query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Return a filepath that contains data about the next change to test.
def get_next_change_file(): path = '/tmp/perf/' changes_to_test = _sorted_ls(path) if changes_to_test: return os.path.join(path, changes_to_test[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def target_test_file_source_content():\n return 'changed'", "def target_test_file_content():\n return 'initial content'", "def GetRerunContextFile(self):\n if not self.prev_test_context or not self.prev_test_context.test_resources:\n return None\n return self.prev_test_context.test_resources[0]", "def _get_new_measurement_path() -> pathlib.Path:\n today = strftime(\"%Y%m%d\")\n today_path = DATA_DIR / today\n new_path = get_unique_path(today_path, 'measurement_{:03d}')\n return new_path", "def get_test_file_path(self):\n xml_file_path_prefix = \"./tests/\"\n return xml_file_path_prefix + self.test_name + \"_data/\"", "def get_testdata(file_name):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"testdata\",\n file_name)", "def _GetChangePath(change):\n return 'changes/%s' % str(change).replace('/', '%2F')", "def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file", "def get_current_file(self):\n#-----------on attend la fin de creation du fichier Nexus\n \n while self._ismoving():\n self.logger.debug(\"DataRecorder creat Nexus file\") \n time.sleep(1.0)\n return self.dp.currentFiles[0]", "def path(pathstring='/data'):\n camera.status.path = pathstring.strip()\n logger.info('Next file name: '+os.path.join(camera.status.path,camera.status.nextfile))", "def source_test_file_name():\n return 'feature'", "def target_test_file_name():\n return 'test'", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def get_history_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, histories.DEFAULT_FILENAME_HISTORY)", "def get_sample_swap_file(self) -> Path:\n return self.flow_data_paths.swap_path", "def get_test_path():\n path, name = os.path.split(__file__)\n return os.path.join(path,\"..\", 'test-data')", "def CurrentDataFile(self):\n if self.force_auto_sync:\n self.get('CurrentDataFile')\n return self._CurrentDataFile", "def current_buildfile(self):\r\n return self._active_buildfile", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def _fixture(self):\n fdir = os.path.join(FIXTURES_DIR, 'errata.devel.redhat.com/')\n filename = self._url_with_params.replace(\n 'https://errata.devel.redhat.com/', fdir)\n # If we need to represent this API endpoint as both a directory and a\n # file, check for a \".body\" file.\n if os.path.isdir(filename):\n return filename + '.body'\n return filename", "def getCurrentStep():", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def current_file():\n george_script = \"tv_GetProjectName\"\n return CommunicationWrapper.execute_george(george_script)", "def get_file_save_path(self):\n return self.out", "def current_step(self):\n try:\n last_line = tail(path.join(self.run_dir, \"out.txt\"), 8)\n except FileNotFoundError:\n return -1\n if not last_line: # Empty file\n return -1\n if re.search(\"now at t\", last_line[-1]):\n # Unless the line was incomplete, there should be a match with:\n a = re.match(r\".* n = *(.*?)$\", last_line[-1])\n if a:\n return int(a.group(1))\n # Otherwise, try the previous one\n a = re.match(r\".* n = *(.*?)$\", last_line[-2])\n if a:\n return int(a.group(1))\n else:\n return -1 # Some error exists in the file\n\n elif \" Osiris run completed normally\\n\" in last_line:\n return self.total_steps\n else:\n return -1", "def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name", "def current(self) -> str:\n return f\"{self.base}/data/{self.digest}/{self.images[self.cursor]}\"" ]
[ "0.6054829", "0.6054829", "0.6054829", "0.58682424", "0.58409095", "0.57942855", "0.57845366", "0.56979823", "0.5621857", "0.56051517", "0.5566274", "0.5565939", "0.5536013", "0.5490917", "0.54188967", "0.5408854", "0.53556013", "0.5328166", "0.53233224", "0.5313902", "0.53011644", "0.5301147", "0.529975", "0.5288279", "0.5281193", "0.5278165", "0.525051", "0.52467275", "0.5245046", "0.5238465" ]
0.77210146
0
convert area in rad^2 to km^2
def area_rad_to_km(area_rad): r_earth = 6.37122e3 # SHR_CONST_REARTH, in km circ = 2*np.pi*r_earth foo = xr.ufuncs.sqrt(area_rad.copy()) foo *= r_earth area_km = foo**2 return area_km
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_ha_to_km2(nb):\n return nb / 100", "def rad(area) :\n return sqrt(area/pi)", "def km2m(km):\n return km * 1000", "def km2_area(polygons):\n\n reprojected_polygons = [reproject(p) for p in polygons]\n return ops.cascaded_union(reprojected_polygons).area * 1e-6", "def mi_to_m(radius):\n return int(float(radius) * 1609.34)", "def test_convert_to_km(self):\n assert meters_to_km(100) == \"0.10\"\n assert meters_to_km(494) == \"0.49\"\n assert meters_to_km(495) == \"0.49\"\n assert meters_to_km(496) == \"0.50\"\n assert meters_to_km(10000) == \"10.00\"\n assert meters_to_km(10200) == \"10.20\"", "def circumference_area(radius):\n return float('%.3f'%(radius * math.pi))", "def miles_to_kilometers(miles):\n #convert miles to km:\n return miles*1.60934", "def kilometers_to_miles(km):\n #convert km to miles:\n return km*0.621371", "def areaTriangulo(base,altura):\n return ((base*altura)/2)", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def earth_radius(units=\"m\"):\n if \"m\" == units:\n return 6371000\n elif \"km\" == units:\n return 6371\n elif \"mi\" == units:\n return 3959", "def circle_area(radius : number) -> number:\n area = pi*radius*radius\n #print(\"The area of circle is =\", area, \"sq.units\")\n return area", "def convert_area(self, event):\n try:\n #Compare other unit to one unit(square meters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acres\": 4046.8564224, \"ares\" :100.0, \"circular inches\": 0.0005067, \"hectares\": 10000.0, \"hides\": 485000.0, \"roods\": 1011.7141056, \"square centimeters\": 0.0001, \"square feet(US & UK)\": 0.092803, \"square feet(US survey)\": 0.092803, \"square inches\": 0.000645, \"square kilometers\": 1000000.0, \"square meters\": 1.0, \"square miles\": 2589988.110336, \"square millimeters\": 0.000001, \"square of timber\": 9.280304, \"square rods or poles\": 25.29285264, \"square yards\": 0.83612736, \"townships\": 93239571.972}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def haversine(lat1, lon1, lat2, lon2):\n\t\t # convert decimal degrees to radians \n\t\t lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t\t # haversine formula \n\t\t dlon = lon2 - lon1 \n\t\t dlat = lat2 - lat1 \n\t\t a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\t\t c = 2 * asin(sqrt(a)) \n\t\t km = 6367 * c\n\t\t return km", "def ponderar(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def calculate_area(radius: int) -> None:\n\n # process\n area = math.pi * radius ** 2\n\n # output\n print(f\"The area is {area:.2f} cm²\")", "def area_circle(radius):\n area = PI * radius**2\n return '{:.4f}'.format(area)", "def areaTriangulo(base,altura):\n\treturn (base*altura)/2", "def rad(x) :#en mm!\r\n return topdia(x)/2.0", "def M(latitude):\n return a*(1.0-e2)/pow((1.0-e2)*pow(math.sin(latitude),2.0),3.0/2.0);", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n return self.radius*self.radius*math.pi", "def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km", "def haversine(lat2, lon2):\n\n lat1 = 53.342998628\n lon1 = -6.256165642\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n\n return km", "def spherearea(dia):\n r = dia*1e-4 # convert to cm\n return(4*np.pi*r**2)", "def miles_to_radians(value):\n NAUTICAL_MILE_CONV = 0.868976 # convert miles to nautical miles\n nmiles = float(value)*NAUTICAL_MILE_CONV\n return nmiles*math.pi/(180.0*60.0)", "def haversine(lon1, lat1, lon2, lat2): \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \r\n #print 34\r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \r\n c = 2 * atan(sqrt(a)/sqrt(1-a)) \r\n r = 6371 \r\n d=c * r\r\n #print type(d)\r\n return d" ]
[ "0.71732515", "0.6853367", "0.6772503", "0.6647387", "0.65435016", "0.65118784", "0.6350859", "0.6348541", "0.63110465", "0.622082", "0.61698097", "0.61480993", "0.61387134", "0.6091799", "0.60879374", "0.60483783", "0.60402596", "0.60270166", "0.60109943", "0.6004886", "0.59828997", "0.5966955", "0.5966955", "0.5965002", "0.5949714", "0.5928514", "0.59160316", "0.59158427", "0.5910243", "0.5908653" ]
0.80855316
0
Blindly sets state based on the items like statedict
def __setstate__(self, statedict): for k, v in list(statedict.items()): setattr(self, k, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_state( self ):", "def set_state(canvas, state):\n for key, value in state.items():\n set_attribute(canvas, key, value)", "def __setstate__(self, state):\n exclude_keys = ['_is_zero', '_is_positive', '_is_nonneg']\n for key,value in state.items():\n if key in exclude_keys:\n continue\n if key == '_field':\n self._init_field(value)\n continue\n self.__dict__[key] = value\n return state", "def make_state(self, content, initial):\n if initial in (True, False):\n return {name:initial for name in content}\n else:\n return {name:state for (name,state) in zip(content, initial)}", "def __setstate__(self, state):\n\n self.list = state", "def __setstate__(self, state):\n\n self.list = state", "def __setstate__(self, state):\n self.__dict__.update(state)", "def __setstate__(self, state):\n self.__dict__.update(state)\n for y in ['strains', 'alleles', 'base_cobra_model']:\n for x in getattr(self, y):\n x._model = self\n if not hasattr(self, \"name\"):\n self.name = None", "def __setstate__(self, state):\n # Restore instance attributes\n try: \n obj = Thing.ID_dict[state['id']] # is this obj already in dict?\n dbg.debug(\"Note: %s already in Thing.ID_dict, maps to %s\" % (state['id'], obj))\n except KeyError: # Not already in dict\n Thing.ID_dict[state['id']] = self\n if 'has_beat' in state:\n Thing.game.register_heartbeat(self)\n self.__dict__.update(state)", "def set_state(self, state: int):", "def toState(attrs=ALL):", "def set_states(self, item, short=False):\n if short or self.tail_batch is None:\n self.memory.set('states', self.s, self.e, item)\n else:\n bl = len(self.tail_batch)\n self.memory.set('states', self.s, self.e, item[:-bl])\n self.tail_batch['states'] = item[-bl:]", "def __setstate__(self, state):\n\n for key, value in state.items():\n if key in self.__slots__:\n setattr(self, key, value)", "def setState(self):\n\t\tself.stateDict = {'playerlist': self.playerList[:], \\\n\t\t\t\t\t\t\t'comcards': self.communityCards[:], \\\n\t\t\t\t\t\t\t'pots':\t\tself.pots[:], \\\n\t\t\t\t\t\t\t'curbet':\tself.currentBet[:], \\\n\t\t\t\t\t\t\t'turn':\t\tself.turn, \\\n\t\t\t\t\t\t\t'isGameEnd': self.isGameEnd}", "def __setstate__(self, _state : dict):\n self.__init__(**_state)", "def fromState(state):", "def updateList(self):\n for state in list_:\n state.update(True)", "def __call__(self, **kwargs):\r\n for item, value in kwargs.items():\r\n if item not in self._state[\"data\"]:\r\n self._state[\"data\"][item] = value", "def __call__(self, **kwargs):\r\n for item, value in kwargs.items():\r\n if item not in self._state[\"data\"]:\r\n self._state[\"data\"][item] = value", "def __setstate__(self, state):\n super().__setstate__(state)\n\n self.annotations = SortedList(self.annotations)\n self.links = SortedList(self.links)\n self.groups = SortedList(self.groups)\n self.generics = SortedList(self.generics)\n\n self.index = DataIndex()\n self.index.update_basic_index(list(self.annotations))\n self.index.update_basic_index(list(self.links))\n self.index.update_basic_index(list(self.groups))\n self.index.update_basic_index(list(self.generics))\n\n for a in self.annotations:\n a.set_pack(self)\n\n for a in self.links:\n a.set_pack(self)\n\n for a in self.groups:\n a.set_pack(self)\n\n for a in self.generics:\n a.set_pack(self)", "def checkitem_states(self):\r\n return CheckItemStates(self)", "def __call__(self, **kwargs):\n for item, value in kwargs.items():\n if item not in self._state[\"data\"]:\n self._state[\"data\"][item] = value", "def __call__(self, **kwargs):\n for item, value in kwargs.items():\n if item not in self._state[\"data\"]:\n self._state[\"data\"][item] = value", "def __call__(self, **kwargs):\n for item, value in kwargs.items():\n if item not in self._state[\"data\"]:\n self._state[\"data\"][item] = value", "def __setstate__(self, d):\n\t\tself.__dict__ = d", "def preset_items(self):\r\n\r\n raise NotImplementedError", "def __setstate__(self, state: Dict[str, Any]):\n self.__dict__.update(state)\n self.__dict__['__db'] = None", "def __setstate__(self,state):\n self.__dict__.update(state)\n self.KDTreeFinder = spatial.KDTree(self.featureVals)", "def __setstate__(self, state):\n self.__dict__ = dict(state)\n self._init_compiled()", "def state(self, newState):\n for i in range(len(newState)):\n self._state[i] = newState[i]" ]
[ "0.6640367", "0.6613323", "0.6503467", "0.64254403", "0.6403489", "0.6403489", "0.61876637", "0.61295795", "0.6122762", "0.61179495", "0.60977346", "0.6040049", "0.60339725", "0.6022381", "0.5904621", "0.5903245", "0.58718616", "0.5868567", "0.5868567", "0.5859888", "0.5841554", "0.5838092", "0.5838092", "0.5838092", "0.5793427", "0.578515", "0.57801", "0.57758725", "0.5773722", "0.57712317" ]
0.6726932
1
Create a new enum class with the given names and values.
def Enum(name,names,values=None): e = new.classobj(name,(EnumBase,),{}) e._initialize(names,values) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_enum(name, fields, values=None):\n Enum = namedtuple(name, fields)\n if values is None:\n return Enum(*fields)\n return Enum(*values)", "def Enum(name,names,values=None):\n e = type(name,(EnumBase,),{})\n e._initialize(names,values)\n return e", "def enum(cls, options, values):\r\n names, real = zip(*options)\r\n del names # unused\r\n\r\n def factory(i, name):\r\n return cls(i, name, (len(real),), lambda a: real[a[0]], values)\r\n return factory", "def tenum(name, vals):\n\n if isinstance(vals, basestring):\n vals = vals.split()\n sort, consts = z3.EnumSort(name, vals)\n fields = dict(zip(vals, consts))\n fields[\"_z3_sort\"] = sort\n return type(name, (SEnumBase, SymbolicVal), fields)", "def def_enum(dct, name):\n return type(name, (Enum,), dct)", "def from_value(cls, value):\n value = value if value else 0\n try:\n flags = [flag.name for flag in cls.enum_class if flag.value & value]\n except TypeError:\n flags = [flag.name for flag in cls.enum_class if flag.name == value]\n\n return cls(*flags)", "def enum(**enums):\n return type('Enum', (), enums)", "def enum(**enums):\n return type('Enum',(),enums)", "def from_string(cls, name: str) -> Enum:", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n return type('Enum', (), enums)", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n return type('Enum', (), enums)", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n return type('Enum', (), enums)", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n return type('Enum', (), enums)", "def LabeledEnum(class_name: str, names: list) -> Enum:\n original_frame = sys._getframe().f_back\n module = original_frame.f_globals['__name__']\n qualname = '.'.join((module, class_name))\n if len(names[0]) == 3:\n enum_names = [item[:2] for item in names]\n label_index = 2\n else:\n enum_names = [(item[0], item[0]) for item in names]\n label_index = 1\n new_enum = SafePickeableEnum(class_name, names=enum_names, module=module, qualname=qualname)\n for enum_item, item in zip(new_enum, names):\n enum_item.label = item[label_index]\n return new_enum", "def from_python_enum_direct_values(cls, enum, name=None):\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v.value) for v in enum])", "def from_python_enum(cls, enum, name=None):\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v) for v in enum])", "def test_enum(self):\n\n # XXX should test null or empty lists, ill-formed names\n name = 'george'\n pairs = [('abc', 3), ('def', 5), ('ghi', 7)]\n enum = M.EnumSpec.create(name, pairs)\n # self.assertEqual( ','.join(pairs), enum.__repr__())\n self.assertEqual(3, enum.value('abc'))\n self.assertEqual(5, enum.value('def'))\n self.assertEqual(7, enum.value('ghi'))", "def build_class(classname, values):\n values['FIELDS'] = [x for x in values.keys()]\n return type(classname, (object,), values)", "def __new__(mcs, cls, bases, classdict, **kwds):\n enum_class = super().__new__(mcs, cls, bases, classdict, **kwds)\n copied_member_map = dict(enum_class._member_map_)\n enum_class._member_map_.clear()\n for k, v in copied_member_map.items():\n enum_class._member_map_[k.lower()] = v\n return enum_class", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = dict((value, key) for key, value in list(enums.items()))\n enums['reverse_mapping'] = reverse\n return type(str('Enum'), (), enums)", "def enum(cls):\n\n assert cls.__bases__ == (object,)\n\n d = dict(cls.__dict__)\n new_type = type(cls.__name__, (int,), d)\n new_type.__module__ = cls.__module__\n\n map_ = {}\n for key, value in iteritems(d):\n if key.upper() == key and isinstance(value, integer_types):\n value_instance = new_type(value)\n setattr(new_type, key, value_instance)\n map_[value] = key\n\n def str_(self):\n if self in map_:\n return \"%s.%s\" % (type(self).__name__, map_[self])\n return \"%d\" % int(self)\n\n def repr_(self):\n if self in map_:\n return \"<%s.%s: %d>\" % (type(self).__name__, map_[self], int(self))\n return \"%d\" % int(self)\n\n setattr(new_type, \"__repr__\", repr_)\n setattr(new_type, \"__str__\", str_)\n\n return new_type", "def sequential_enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n return type('Enum', (), enums)", "def enum(**enums):\n reverse = dict((value, key) for key, value in iteritems(enums))\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)", "def __init__(\n self,\n name,\n namespace,\n symbols,\n names=None,\n doc=None,\n other_props=None,\n ):\n symbols = tuple(symbols)\n symbol_set = frozenset(symbols)\n if (len(symbol_set) != len(symbols)\n or not all(map(lambda symbol: isinstance(symbol, _str), symbols))):\n raise AvroException(\n 'Invalid symbols for enum schema: %r.' % (symbols,))\n\n super(EnumSchema, self).__init__(\n data_type=ENUM,\n name=name,\n namespace=namespace,\n names=names,\n other_props=other_props,\n )\n\n self._props['symbols'] = symbols\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, raw_enum: Dict):\n self.name: str = raw_enum.get(\"name\")\n self.description: str = raw_enum.get(\"description\")\n self.is_deprecated: bool = raw_enum.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_enum.get(\"deprecationReason\")", "def IntEnum(name, keys, start=1):\n return IntEnumBase(name,\n [(key, index) for index, key in enumerate(keys, start=start)])", "def __new__(cls, index):\n # If is enum type of this class, return it.\n if isinstance(index, cls):\n return index\n\n # If number, look up by number.\n if isinstance(index, six.integer_types):\n try:\n return cls.lookup_by_number(index)\n except KeyError:\n pass\n\n # If name, look up by name.\n if isinstance(index, six.string_types):\n try:\n return cls.lookup_by_name(index)\n except KeyError:\n pass\n\n raise TypeError('No such value for %s in Enum %s' %\n (index, cls.__name__))", "def __init__(self, name, number=None):\n # Immediately return if __init__ was called after _Enum.__init__().\n # It means that casting operator version of the class constructor\n # is being used.\n if getattr(type(self), '_DefinitionClass__initialized'):\n return\n object.__setattr__(self, 'name', name)\n object.__setattr__(self, 'number', number)", "def check_enum(self, name, values):\n v = self.__dict__.get(name)\n if v not in values:\n raise ValueError(\n \"Invalid value: {0}='{1}', not in '{2}'\".format(name, v, values))", "def __init__(self, node, declare):\n symbol.__init__(self, node, declare, \"enum\", \"Enumeration\")\n # check if values are required, must be true or false\n val_req = getOptionalTag(node, \"valuesRequired\", \"false\")\n if val_req == \"false\":\n self.val_req = False\n elif val_req == \"true\":\n self.val_req = True\n else:\n err = \"Enumeration field 'valueRequired' must be either 'true' or 'false'.\\n\"\n err += \"Got: %s in node:\\n %s\" % (val_req, node.toxml())\n raise Exception(err)\n\n self.entries = []\n members = getNode(node, \"members\")\n for entry in filter(lambda n: n.nodeType == n.ELEMENT_NODE, members.childNodes):\n ent = declare( entry )\n if ent.getType() != \"enumEntry\":\n raise Exception(\"Incorrect entry '\"+ent.getType()+\"' found in enumeration:\\n\"+node.toxml())\n self.entries.append(ent)" ]
[ "0.8069038", "0.77621424", "0.7693179", "0.67224294", "0.64560413", "0.64129335", "0.6399653", "0.6381578", "0.6362524", "0.6356486", "0.6356486", "0.6356486", "0.6356486", "0.6295191", "0.6264949", "0.622943", "0.61205715", "0.6103254", "0.6100767", "0.6011548", "0.597129", "0.59065425", "0.5879894", "0.58455706", "0.57994115", "0.57555294", "0.5613256", "0.5602002", "0.5582253", "0.55605346" ]
0.79247373
1
Check out a license feature from the license server ahead of time. checkoutlicense(self,feature_)
def checkoutlicense(self,feature_): res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkoutlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkoutlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def checkinlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkinlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def checkinlicense(self,feature_):\n res = __library__.MSK_XX_checkinlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def test_checkout_repository(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.contribtool.checkout_repository(TOOLNAME,username,userpass)", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def checkout(self): \n mtool = getToolByName(self.context, \"portal_membership\")\n ICheckoutManagement(self.context).redirectToNextURL(\"AFTER_START\")", "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def execute(self):\r\n _logger.info(\"=== Stage=checkout = %s\" % self._config.name)\r\n _logger.info(\"++ Started at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n # for testing: result = session.create(\"ppd_sw-fa1f5132#wbernard2:project:sa1spp#1\")\r\n if (result != None):\r\n _logger.info(\"Project found: '%s'\" % result)\r\n\r\n # setting up the project\r\n self.__setup_project(project, result)\r\n else:\r\n _logger.info(\"Checking out from '%s'.\" % project)\r\n \r\n purpose = None\r\n if self._config.has_key('purpose'):\r\n purpose = self._config['purpose']\r\n _logger.info(\"Using purpose: '%s'\" % purpose)\r\n \r\n version = None\r\n if self._config.has_key('version'):\r\n version = self._config['version']\r\n _logger.info(\"Using version: '%s'\" % version)\r\n\r\n try:\r\n if (not self._config.get_boolean('use.default_wa_path', True)):\r\n wa_path = self._config['dir']\r\n _logger.info(\"Using work area path to checkout directly\")\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose, path=wa_path)\r\n else:\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose)\r\n ccm.log_result(result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n self.__setRole(session)\r\n except ccm.CCMException, exc:\r\n ccm.log_result(exc.result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n raise exc\r\n finally:\r\n self.__restoreRole(session)\r\n _logger.info('Checkout complete')\r\n \r\n if result.project != None and result.project.exists(): \r\n _logger.info(\"Project checked out: '%s'\" % result.project)\r\n \r\n try:\r\n self.__setRole(session)\r\n _logger.info(\"Maintaining the workarea...\")\r\n if self.get_threads() == 1:\r\n output = result.project.work_area(True, True, True, self._config['dir'], result.project.name)\r\n else:\r\n output = ccm.extra.FastMaintainWorkArea(result.project, self._config['dir'], result.project.name, self.get_threads())\r\n ccm.log_result(output, ccm.CHECKOUT_LOG_RULES, _logger)\r\n finally:\r\n self.__restoreRole(session)\r\n self.__setup_project(project, result.project)\r\n else:\r\n raise Exception(\"Error checking out '%s'\" % project)\r\n\r\n _logger.info(\"++ Finished at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))", "def checkout(self, checkout):\n\n self._checkout = checkout", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def activate_license(self):\n response = self.client.activate_license()\n if str(response[\"result\"][\"code\"]) == \"0\" and str(response[\"data\"][\"LicenseActiveResult\"]) == \"0\":\n self.module.exit_json(msg=\"Activate license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Activate license file fail.{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def accept_license():\r\n msg, status = \"\", True\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'Click on license accept button'\r\n flag1 = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n \r\n \r\n\r\n status = False if not (flag1) else True\r\n else:\r\n \r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def checkout(self, checkout, *args):\n return self.cmd('checkout', checkout, *args)", "def CheckProductAndLicense():\n\n try:\n if arcpy.CheckExtension(\"Spatial\") == \"Available\": # check if spatial analyst extension is available\n arcpy.CheckOutExtension(\"Spatial\") # check out extension if available\n\n else: # spatial analyst extension is not available\n raise LicenseError # raise license error\n\n except LicenseError: # print customized message if license error raised\n arcpy.AddMessage(\"Spatial Analyst license is unavailable. Terminate the process.\")\n print(\"Spatial Analyst license is unavailable. Terminate the process.\")\n sys.exit()\n\n except arcpy.ExecuteError: # if other error encountered, print execution message\n arcpy.AddMessage(arcpy.GetMessages(2))\n print(arcpy.GetMessages(2))", "def license(self, license):\n\n self._license = license", "def put(self, license_handler):\n\n full_license = request.data\n return license_handler.upload_license(full_license)", "def licensecleanup(): # 3\n res = _msk.Env.licensecleanup()\n if res != 0:\n raise Error(rescode(res),\"\")", "def checkout(self, dbapi_connection, connection_record, connection_proxy):", "def test_59_help_license(self):\r\n url = \"/help/license\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a help license page\"\r\n assert \"Licenses\" in res.data, err_msg", "def query_active_license(self):\n response = self.client.query_active_license()\n if str(response[\"result\"][\"code\"]) == \"0\":\n if str(response[\"data\"][\"FileExist\"]) == \"0\":\n self.module.exit_json(msg=\"License file exists.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"License file not exists.You should add the License file first.Your License \"\n \"Serial No is: {0}\".format(response[\"data\"][\"LicenseSerialNo\"]),\n changed=False, status='fail')\n else:\n self.module.fail_json(msg=\"Query active licenses in batches has an error.\"\n \"{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def checkout(self, timeout):\n\n if not 0 < timeout <= BespokeGlobals.MAX_CHECKOUT_TIME:\n raise FatalError(\"Timeout is out of range!\")\n elif self._in_use and (datetime.now() < self._lock_expiration):\n raise CoreError(\"This SystemUnderTest is in use currently!\")\n elif self._in_use and (datetime.now() > self._lock_expiration):\n # A lock time out occurred and we need to force a checkin first.\n self.checkin()\n\n self._in_use = True\n self._lock_expiration = datetime.now() + timedelta(seconds=timeout)\n\n self._machine.setup()", "def releaseLicence(self):\n\t\t\tpulpCPLEX.releaseLicence()", "def licensecleanup():\n res = __library__.MSK_XX_licensecleanup()\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def svn_client_checkout(svn_revnum_t_result_rev, char_URL, char_path, svn_opt_revision_t_revision, svn_boolean_t_recurse, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def _supplySupply(self, url, compute_node_id, state):\n compute_node_document = self.getPortalObject().portal_catalog.getComputeNodeObject(compute_node_id)\n compute_node_document.requestSoftwareRelease(software_release_url=url, state=state)", "def whmcs_license(username=None, lkey=None, action=\"view\"):\n actions_list = ['view', 'add', 'transfer', 'del']\n\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # build request\n action = action.lower()\n if action not in actions_list:\n print(\"!! Invalid action: %s\" % (action))\n print(\" Valid actions are: %s\" % (', '.join(actions_list)))\n return False\n\n if action == 'view':\n if username is not None:\n sterm = username\n stype = 'user'\n elif lkey is not None:\n sterm = lkey\n stype = 'key'\n else:\n print(\"!! Must specify either username or lkey\")\n return False\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'query': stype, 'term': sterm})\n\n elif action == 'add':\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'user': username})\n\n elif action == 'del' or action == 'transfer':\n\n if not lkey:\n # lookup the license first\n kresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': 'view', 'query': 'user', 'term': username})\n check_sc_login(kresp.text)\n\n try:\n ktext = kresp.text.replace('<br />', '\\n').replace('<font size=\"3pt\">', '').replace('</font>', '').strip()\n lkey = re.search(r'\\WLicense Key: (Leased-.+)\\W', ktext, re.I|re.M).group(1)\n except:\n print(\"!! Unable to determine license key for user\")\n return False\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'key': license})\n\n # check login\n check_sc_login(lresp.text)\n\n # clean up response\n ltext = lresp.text.replace('<br />', '\\n').replace('<font size=\"3pt\">', '').replace('</font>', '').strip()\n\n print(\"** Got response from SC:\\n%s\" % (ltext))\n\n return lresp", "def test_link_to_checkout(self):\n self.browser.find_element_by_link_text('Checkout').click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + self.CHECKOUT_URL)", "def productactivate():\n pass", "def ProcessCheckDeviceLicenseRequest(self):\n response = dm.DeviceManagementResponse()\n license_response = response.check_device_license_response\n policy = self.server.GetPolicies()\n selection_mode = dm.CheckDeviceLicenseResponse.ADMIN_SELECTION\n if ('available_licenses' in policy):\n available_licenses = policy['available_licenses']\n selection_mode = dm.CheckDeviceLicenseResponse.USER_SELECTION\n for license_type in available_licenses:\n license = license_response.license_availabilities.add()\n license.license_type.license_type = LICENSE_TYPES[license_type]\n license.available_licenses = available_licenses[license_type]\n license_response.license_selection_mode = (selection_mode)\n\n return (200, response)" ]
[ "0.8759661", "0.76959074", "0.7425138", "0.5600213", "0.5537508", "0.5521356", "0.548995", "0.54747057", "0.5455874", "0.5384995", "0.53284734", "0.5324986", "0.5264816", "0.511373", "0.5108549", "0.5074378", "0.50742406", "0.506557", "0.49946", "0.4989902", "0.49843487", "0.4963812", "0.49341097", "0.49045157", "0.49025765", "0.48976263", "0.48842925", "0.48662648", "0.48615", "0.48608255" ]
0.8638246
1
Check in a license feature back to the license server ahead of time. checkinlicense(self,feature_)
def checkinlicense(self,feature_): res = __library__.MSK_XX_checkinlicense(self.__nativep,feature_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkinlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkinlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def checkoutlicense(self,feature_):\n res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def checkoutlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkoutlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def accept_license():\r\n msg, status = \"\", True\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'Click on license accept button'\r\n flag1 = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n \r\n \r\n\r\n status = False if not (flag1) else True\r\n else:\r\n \r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def activate_license(self):\n response = self.client.activate_license()\n if str(response[\"result\"][\"code\"]) == \"0\" and str(response[\"data\"][\"LicenseActiveResult\"]) == \"0\":\n self.module.exit_json(msg=\"Activate license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Activate license file fail.{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def CheckProductAndLicense():\n\n try:\n if arcpy.CheckExtension(\"Spatial\") == \"Available\": # check if spatial analyst extension is available\n arcpy.CheckOutExtension(\"Spatial\") # check out extension if available\n\n else: # spatial analyst extension is not available\n raise LicenseError # raise license error\n\n except LicenseError: # print customized message if license error raised\n arcpy.AddMessage(\"Spatial Analyst license is unavailable. Terminate the process.\")\n print(\"Spatial Analyst license is unavailable. Terminate the process.\")\n sys.exit()\n\n except arcpy.ExecuteError: # if other error encountered, print execution message\n arcpy.AddMessage(arcpy.GetMessages(2))\n print(arcpy.GetMessages(2))", "def checkin(self, guest_name):\n pass", "def query_active_license(self):\n response = self.client.query_active_license()\n if str(response[\"result\"][\"code\"]) == \"0\":\n if str(response[\"data\"][\"FileExist\"]) == \"0\":\n self.module.exit_json(msg=\"License file exists.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"License file not exists.You should add the License file first.Your License \"\n \"Serial No is: {0}\".format(response[\"data\"][\"LicenseSerialNo\"]),\n changed=False, status='fail')\n else:\n self.module.fail_json(msg=\"Query active licenses in batches has an error.\"\n \"{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def checkin(self):\n\n if self._in_use:\n self._in_use = False\n self._lock_expiration = datetime.now()\n self._machine.tear_down()", "def ValidateLicense(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def checkin(self, checkin):\n\n self._checkin = checkin", "async def _do_work_claim(self) -> bool:\n # 1. Ask the LTA DB for the next Bundle to be deleted\n # configure a RestClient to talk to the LTA DB\n lta_rc = ClientCredentialsAuth(address=self.lta_rest_url,\n token_url=self.lta_auth_openid_url,\n client_id=self.client_id,\n client_secret=self.client_secret,\n timeout=self.work_timeout_seconds,\n retries=self.work_retries)\n self.logger.info(\"Asking the LTA DB for a Bundle to check for TransferRequest being finished.\")\n pop_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\"\n }\n response = await lta_rc.request('POST', f'/Bundles/actions/pop?source={self.source_site}&dest={self.dest_site}&status={self.input_status}', pop_body)\n self.logger.info(f\"LTA DB responded with: {response}\")\n bundle = response[\"bundle\"]\n if not bundle:\n self.logger.info(\"LTA DB did not provide a Bundle to check. Going on vacation.\")\n return False\n # update the TransferRequest that spawned the Bundle, if necessary\n await self._update_transfer_request(lta_rc, bundle)\n # even if we processed a Bundle, take a break between Bundles\n return False", "def check_in(self, data):\n data = clean(data, self.check_in_parameters)\n return self.put(\"/devices/checkin\", data)", "def __call__(self, feature):\n return self.is_enabled(feature)", "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n 'Folio {} is not yet to be processed'.format(self.folio_id.name)))\n hours, minutes = decimal_to_time(self.env.user.company_id.checkin_hour)\n can_check_in = datetime.combine(\n date.today(), tm(hours, minutes)) < datetime.now()\n if not can_check_in:\n raise UserError(\n 'Guest(s) cannot be checked in earlier than {}'.format(\n self.env.user.company_id.checkin_hour))\n if self.folio_id.room_id.occupy():\n self.folio_id.write({'state': 'checkin'})", "def _is_ticketing_handled(self, regform, **kwargs):\n return regform.cern_access_request is not None and regform.cern_access_request.is_active", "def check_license_applied(self, table, repo, license_id):\n views = LicenseView.objects.filter(\n table=table,\n repo_base=self.repo_base,\n repo_name=repo,\n license_id=license_id)\n\n if len(views) == 0:\n return False\n return True", "def checkin(self, dbapi_connection, connection_record):", "def ProcessCheckDeviceLicenseRequest(self):\n response = dm.DeviceManagementResponse()\n license_response = response.check_device_license_response\n policy = self.server.GetPolicies()\n selection_mode = dm.CheckDeviceLicenseResponse.ADMIN_SELECTION\n if ('available_licenses' in policy):\n available_licenses = policy['available_licenses']\n selection_mode = dm.CheckDeviceLicenseResponse.USER_SELECTION\n for license_type in available_licenses:\n license = license_response.license_availabilities.add()\n license.license_type.license_type = LICENSE_TYPES[license_type]\n license.available_licenses = available_licenses[license_type]\n license_response.license_selection_mode = (selection_mode)\n\n return (200, response)", "def checkin(self, message):\n if not validate_notification_message(message, CHECKIN_MESSAGE_FIELDS):\n raise ValueError('invalid message')\n\n self._submit('articlepkg_checkins', message)", "async def verify(self,ctx,ign='',region=''):\r\n if ign =='' or region =='':\r\n await self.bot.say(\"Please type in a ign and region.\")\r\n return\r\n if not ctx.message.channel.is_private: #Makes sure channel is private\r\n await self.bot.say('Sorry. But this process must be done in a private message, to continue please dm the bot ```{}```'.format(ctx.message.content))\r\n return\r\n try:\r\n pattern = verify.start(ctx.message.author.id, ign,region)\r\n except Exception as e:\r\n await self.bot.say('Error: ' + str(e)+'\\n\\nJoin http://discord.me for more info.')\r\n return\r\n pattern_ = '{} Halcyon Potions, {} Weapon Infusions, and {} Crystal Infusions'.format(str(pattern.count(0)), str(pattern.count(1)), str(pattern.count(2)))\r\n await self.bot.say(\"Awesome. To complete the authorization process.\\n• Enter a **blitz** match\\n• Buy **{}** for your first {} items.\\n• **You can sell them immediately at the same price.**\\n• This must be your next match.\\n• **Once you are done please type {}check to complete authorization process.** Once this is done, your account will be linked and authenticated permanantly.\".format(pattern_,len(pattern), self.bot.command_prefix[0]))\r\n\r\n await asyncio.sleep(345)\r\n\r\n await self.bot.send_message(ctx.message.author, verify.check(ctx.message.author.id))", "def put(self, license_handler):\n\n full_license = request.data\n return license_handler.upload_license(full_license)", "def demomode_accept_license():\r\n msg, status = \"\", True\r\n# import genericfunctions\r\n# genericfunctions.accept_license_function()\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n\r\n # agrment_lbl = ui_controls.text_view(get_obj_identifier('EUL_agrement_labl'))\r\n #if agrment_lbl.strip() =='End User License Agreement': \r\n #print \"End user License Agreement label is displaying properly\" \r\n #else:\r\n # print \"End user License Agreement label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('EUL_agrement_labl','End User License Agreement')\r\n sleep(4) \r\n \r\n\r\n #'get the text view of the Eula acknowledge agreement text'\r\n #Agrement_text_view = ui_controls.text_view(get_obj_identifier('EULA_acknowledge_agrmrnt_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_acknowldge_agrement_txt)\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Agrement_text_view.strip():\r\n #print \"DemoMode Eula agreement acknowledgement report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Eula agreement acknowledgement report is not verified sucessfully\" \r\n \r\n 'verify Eula acknowledge agreement text'\r\n flag2,msg = element_textvalidation('EULA_acknowledge_agrmrnt_text',text_to_verify)\r\n sleep(4) \r\n 'click on eula full view element' \r\n flag3 = ui_controls.Click(get_obj_identifier('EULA_full_view')) \r\n #'get the text view of the Eula whole agreement text'\r\n #Eula_text_view = ui_controls.text_view(get_obj_identifier('EULAagrement_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_EULA_agrement_txt)\r\n\r\n # if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n # if text_to_verify.strip() == Eula_text_view.strip():\r\n #print \"DemoMode Eula agreement report verified sucessfully\"\r\n #else:\r\n # print \"DemoMode Eula agreement device report verified unsucessfully\" \r\n 'verify Eula acknowledge agreement text'\r\n flag3,msg = element_textvalidation('EULAagrement_text',text_to_verify)\r\n sleep(4) \r\n\r\n 'Click on license accept button'\r\n flag4 = ui_controls.button(get_obj_identifier('agree'))\r\n 'verify diagnostics and usage label'\r\n #diagnotsic_usage_lbl = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_lbl'))\r\n #if diagnotsic_usage_lbl.strip() =='Diagnostics and Usage': \r\n #print \"Diagnostics and Usage label is displaying properly\" \r\n #else:\r\n #print \"Diagnostics and Usage label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag5,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n\r\n ''\r\n # Diagnostic_usge_txt_view = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_txt'))\r\n #if not Diagnostic_usge_txt_view:\r\n #print \"Unable to retrieve text of diagnostics and usage text from application\"\r\n # return False, msg\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode diagnostics and usage text file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Diagnostic_usge_txt_view .strip():\r\n # print \"DemoMode Diagnostics and Usage report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Diagnostics and Usage report verified unsucessfully\" \r\n \r\n 'verify end user license agreement label'\r\n flag6,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n sleep(4) \r\n flag7 = ui_controls.button(get_obj_identifier('agree'))\r\n\r\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag5 and flag6 and flag7) else True\r\n else:\r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('a'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def EnableLicenseCheck(self):\n return self._get_attribute('enableLicenseCheck')", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def check_in_book():\n book = request.form\n success_code = overdrive_apis.checkin_book(book)\n flash('The book was successfully checked in and is ready to be downloaded.')\n return render_template('book_details.html', list_of_books=book, what='checkout')", "def register_license(file_path):\n result = mjlib.mj_activate(file_path)\n return result" ]
[ "0.8578027", "0.7463757", "0.7390654", "0.5651141", "0.5581003", "0.5463787", "0.5270471", "0.52479464", "0.5114471", "0.5090737", "0.5089456", "0.503208", "0.49925858", "0.4985686", "0.4984008", "0.49691615", "0.49654573", "0.49629948", "0.49579397", "0.4957077", "0.49551493", "0.49267542", "0.49212673", "0.4905089", "0.48532104", "0.4846419", "0.48402217", "0.48330802", "0.4814681", "0.48101693" ]
0.8341613
1
Preallocates a thread pool. setupthreads(self,numthreads_)
def setupthreads(self,numthreads_): res = __library__.MSK_XX_setupthreads(self.__nativep,numthreads_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_workers(self, num_workers):\n self.pool = []\n\n for _ in range(num_workers):\n self.pool.append(Thread(target=self.threadloop))\n\n for a_thread in self.pool:\n a_thread.setDaemon(True)\n a_thread.start()", "def setNumThreads(self, num):\r\n # implement ThreadPool interface\r\n assert not self.prepared, \"You can't change number of threads for working server\"\r\n self.threads = num", "def setNumThreads(self, num):\r\n self.threads = num", "def setNumThreads(self, num):\n # implement ThreadPool interface\n assert not self.prepared, \"You can't change number of threads for working server\"\n self.threads = num", "def create_and_start_threads(self):\r\n self.create_threads()\r\n self.start_threads()", "def __init__(self, threads_count):\n\n self.queue = Queue(threads_count)\n\n self.threads = []\n self.device = None\n\n self.create_workers(threads_count)\n self.start_workers()", "def start_threads(self, sess, n_threads=4):\n threads = []\n print(\"starting %d data threads for training\" % n_threads)\n for n in range(n_threads):\n t = threading.Thread(target=self.thread_main, args=(sess,0,))\n t.daemon = True # thread will close when parent quits\n t.start()\n threads.append(t)\n # Make sure the queueu is filled with some examples (n = 500)\n num_samples_in_queue = 0\n while num_samples_in_queue < self.capacityTrain:\n num_samples_in_queue = sess.run(self.size_op)\n print(\"Initializing queue, current size = %i/%i\" % (num_samples_in_queue, self.capacityTrain))\n time.sleep(2)\n return threads", "def set_threadpool_size(nthreads):\n os.environ[\"OMP_THREAD_LIMIT\"] = \"0\" if nthreads is None else str(nthreads)", "def init(number_of_workers=0):\n global _wq, _use_workers\n\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n\n # if it is best to use zero workers, then use that.\n _wq = WorkerQueue(_use_workers)", "def __init__(__self__, *,\n threads_per_core: int):\n pulumi.set(__self__, \"threads_per_core\", threads_per_core)", "def construct_threads(self, process, flag):\n\t\tself.parallel_threads.append(self.prepare_batch(process, flag))", "def _process_threadpool_limits_initializier():\n import numpy # required for loky's autodetection\n from threadpoolctl import threadpool_limits\n\n threadpool_limits(limits=1)", "def start_thread_pool(cls):\n if cls.executor is None:\n cls.executor = ThreadPoolExecutor(max_workers=1)", "def setNumThreads(cls, numThreads: int):\n cls.NUMTHREADS = numThreads", "def __init__(self, numthreads):\n self.queue = Queue.Queue()\n for _ in range(numthreads):\n Worker(self.queue)\n logger.debug(\"Event worker pool started with %s threads.\" % numthreads)", "def setup_worker_threads(self):\n \n for thread_number in range(0, self.max_workers):\n worker = DeviceWorker(self, thread_number)\n self.worker_threads.append(worker)\n worker.start()", "def manager(num_thrds, num_loops):\n\n\tmutex.acquire()\n\tcnt.reset()\n\tmutex.release()\n\n\t# initialize the thread pool\n\tthread_pool = []\n\n\tfor i in range(num_thrds):\n\t\tthrd = threading.Thread(target=worker, args=(num_loops, cnt))\n\t\tthread_pool.append(thrd)\n\n\t# start threads\n\tfor i in range(len(thread_pool)):\n\t\tthread_pool[i].start()\n\n\tfor i in range(len(thread_pool)):\n\t\tthreading.Thread.join(thread_pool[i])\n\n\t#cnt.display()", "def _init_threads(self):\n\n self._init_hashers()\n self._queues = {}\n self._threads = {}\n\n for algo in self.algos:\n t = Thread(target=self._queue_updater, args=(algo,), name=algo)\n self._queues[algo] = Queue(MtHasher.QUEUE_SIZE)\n self._threads[algo] = t\n t.start()", "def __init__(self, *args, **kwargs):\n # count the cores available on the local machine\n self.tasks = mp.cpu_count()\n super(ParallelPreprocessor, self).__init__(*args, **kwargs)", "def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)", "def __init__(self, num_threads):\n\n self.num_threads = num_threads\n self.count_threads = self.num_threads\n self.cond = Condition()", "def setNthreads(self, nthreads=None):\n if nthreads is None:\n nthreads = 4\n lib._omp_set_num_threads(nthreads)", "def fill(self):\n for _ in range(Pyro4.config.THREADPOOL_MINTHREADS):\n if not self.attemptSpawn():\n break", "def setNThreads(self,n):\n assert(n>0)\n self._c_param.n_threads = n", "def start(self, nb_threads):\r\n # type: (int) -> None\r\n if self._active_threads:\r\n raise Exception('Threads already started.')\r\n\r\n # Create thread pool\r\n for _ in range(nb_threads):\r\n worker = threading.Thread(\r\n target=_work_function,\r\n args=(self._job_q, self._result_q, self._error_q))\r\n worker.start()\r\n self._thread_list.append(worker)\r\n self._active_threads += 1\r\n\r\n # Put sentinels to let the threads know when there's no more jobs\r\n [self._job_q.put(_ThreadPoolSentinel()) for _ in self._thread_list]", "def initialize_threading(self, worker_env=None):\n if not (os.path.exists(core.config.paths.zmq_public_keys_path) and\n os.path.exists(core.config.paths.zmq_private_keys_path)):\n logging.error(\"Certificates are missing - run generate_certificates.py script first.\")\n sys.exit(0)\n\n for i in range(NUM_PROCESSES):\n args = (i,)\n if worker_env:\n args = (i, worker_env,)\n\n pid = multiprocessing.Process(target=loadbalancer.Worker, args=args)\n pid.start()\n self.pids.append(pid)\n\n self.ctx = zmq.Context.instance()\n self.auth = ThreadAuthenticator(self.ctx)\n self.auth.start()\n self.auth.allow('127.0.0.1')\n self.auth.configure_curve(domain='*', location=core.config.paths.zmq_public_keys_path)\n\n self.load_balancer = loadbalancer.LoadBalancer(self.ctx)\n self.receiver = loadbalancer.Receiver(self.ctx)\n\n self.receiver_thread = threading.Thread(target=self.receiver.receive_results)\n self.receiver_thread.start()\n\n self.manager_thread = threading.Thread(target=self.load_balancer.manage_workflows)\n self.manager_thread.start()\n\n self.threading_is_initialized = True\n logger.debug('Controller threading initialized')\n gevent.sleep(0)", "def prepare(self):\r\n self.socket.listen()\r\n for _ in xrange(self.threads):\r\n thread = Worker(self.tasks)\r\n thread.setDaemon(True)\r\n thread.start()\r\n self.prepared = True", "def __init__(self, pool_size):\n \n self.pool_size=pool_size;", "def create_threads(self):\r\n name = self.short_name\r\n self.all_threads = []\r\n tf.train.add_queue_runner(tf.train.QueueRunner(self._preprocess_queue, [self._enqueue_op] * 2))\r\n\r\n def _create_and_register_thread(*args, **kwargs):\r\n thread = threading.Thread(*args, **kwargs)\r\n thread.daemon = True\r\n self.all_threads.append(thread)\r\n\r\n for i in range(self.num_threads):\r\n # File read thread\r\n _create_and_register_thread(target=self.read_entry_job, name='fread_%s_%d' % (name, i))\r\n\r\n # Preprocess thread\r\n _create_and_register_thread(target=self.preprocess_job,\r\n name='preprocess_%s_%d' % (name, i))\r\n\r\n if self.staging:\r\n # Send-to-GPU thread\r\n _create_and_register_thread(target=self.transfer_to_gpu_job,\r\n name='transfer_%s_%d' % (name, i))", "def init(with_threads=1):\n global threaded, _synchLockCreator, XLock\n\n if with_threads:\n if not threaded:\n if threadingmodule is not None:\n threaded = True\n\n class XLock(threadingmodule._RLock):\n def __reduce__(self):\n return (unpickle_lock, ())\n\n _synchLockCreator = XLock()\n else:\n raise RuntimeError(\n \"Cannot initialize threading, platform lacks thread support\"\n )\n else:\n if threaded:\n raise RuntimeError(\"Cannot uninitialize threads\")\n else:\n pass" ]
[ "0.67134285", "0.664693", "0.66045773", "0.65749764", "0.65331626", "0.6397342", "0.63319147", "0.6289505", "0.6270324", "0.6233809", "0.6203612", "0.6121451", "0.6111992", "0.61068666", "0.6090031", "0.60645485", "0.603164", "0.60059214", "0.5992909", "0.59382534", "0.59107", "0.58843577", "0.587981", "0.587479", "0.58335423", "0.57926667", "0.57748353", "0.57572645", "0.57445294", "0.5740935" ]
0.71301144
0
Obtains a short description of a response code. getcodedesc(code_)
def getcodedesc(code_): symname_ = (ctypes.c_char * value.max_str_len)() str_ = (ctypes.c_char * value.max_str_len)() res = __library__.MSK_XX_getcodedesc(code_,symname_,str_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) _symname_retval = symname_.value.decode("utf-8",errors="replace") _str_retval = str_.value.decode("utf-8",errors="replace") return (_symname_retval,_str_retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_description(self, code):\n try:\n return self.message[str(code)]\n except KeyError:\n return \"Unknown (\" + str(code) + \")\"", "def get_error_description(self, code):\n self.c.execute(\"SELECT * FROM errorcode WHERE code=%d\" % code)\n return self.c.fetchone()[1]", "def describe(result_code):\n return _MESSAGES.get(result_code) or 'unknown error'", "def getcodedesc(code_): # 3\n if not isinstance(code_,rescode): raise TypeError(\"Argument code has wrong type\")\n arr_symname = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_symname = memoryview(arr_symname)\n arr_str = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_str = memoryview(arr_str)\n res,resargs = _msk.Env.getcodedesc(code_,memview_arr_symname,memview_arr_str)\n if res != 0:\n raise Error(rescode(res),\"\")\n retarg_symname,retarg_str = resargs\n retarg_str = arr_str.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n retarg_symname = arr_symname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_symname,retarg_str", "def getResponseString(retCode):\n return (_getResponseString(retCode))", "def errorNumToDesc(self, errorCode):\n for t in self.ERROR_CODES:\n if t[0] == errorCode:\n try:\n return t[2]\n except IndexError:\n return \"\"", "def _get_response_message(code=200, reason=None):\n return {'reason': reason}, code", "def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})", "def code(self) -> \"str\":\n return self._attrs.get(\"code\")", "def get_short_code():\n return rh.get_short_code(request)", "def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning", "def get_result_description(self, nErrCode, bIsBriefMessage = True, bFormated = False):\n\t\treturn call_sdk_function('PrlApi_GetResultDescription', nErrCode, bIsBriefMessage, bFormated)", "def _get_desc(self):\n return self.__desc", "def res_description(self):\n return self.get(\"res_description\", decode=True)", "def gen_estring(ecode):\n ec=atoi(str(ecode))\n if BaseHTTPRequestHandler.responses.has_key(ec):\n return \"HTTP/1.1 %s %s\" %(ec, BaseHTTPRequestHandler.responses[ec][0])\n else:\n return \"HTTP/1.1 %s\" %(ec)", "def decode_error_code(err_code, s, d):\n\n config.logger.warn('Failure: %d %s %s', err_code, s, d)\n\n return {\n 0: 'Request completed successfully. No error',\n 1: 'Invalid API key',\n 2: 'Unknown Request',\n 3: 'Invalid arguements',\n 4: 'Invalid service',\n 5: 'Invalid session',\n 6: 'Insufficient bandwidth available',\n 7: 'No path between src and dst with that service type',\n 8: 'Internal VELOX error',\n 9: 'Nothing to modify',\n -1: 'Server comms error',\n }.get(err_code, 'Unknown error code')", "def __str__(self):\n return self.code", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def code(self) -> str:\n return pulumi.get(self, \"code\")", "def getWlanReasonCodeString(reasonCode):\n rcStr = ''\n try:\n buf = create_unicode_buffer(256)\n bufSize = DWORD(256)\n ret = WlanReasonCodeToString( reasonCode, bufSize, buf, None)\n if ret != ERROR_SUCCESS:\n raise WinError(ret)\n rcStr = buf.value\n except Exception,err:\n print 'getWlanReasonCodeString() fail - err %s' % err\n rcStr = '**'\n return rcStr", "def status_request(dev, code, response_length, verbose=False):\n communicate(dev, a2b_hex('C' + code), a2b_hex('D' + code), verbose=verbose)\n response = dev.read(response_length)\n if verbose:\n print('<-', repr(response))\n return response", "def decode(self, code):\n raise NotImplementedError", "def response_description(self):\n return self._response_description", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def code(self) -> Optional[str]:\n return pulumi.get(self, \"code\")", "def _parse_code_desc(code_desc):\n close_index = code_desc.find(']')\n return code_desc[1:close_index]", "def get_description(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetDescription', self.handle)" ]
[ "0.7868941", "0.7265099", "0.71852154", "0.7024114", "0.68930936", "0.65707564", "0.6471099", "0.61716706", "0.6068444", "0.60453534", "0.5955093", "0.59224325", "0.59201384", "0.5877441", "0.5869272", "0.5843591", "0.584061", "0.58223593", "0.58223593", "0.58223593", "0.5772007", "0.5764537", "0.576136", "0.5755891", "0.5735942", "0.5735942", "0.5735942", "0.5735942", "0.5713406", "0.56947994" ]
0.7308555
1
Enables debug information for the license system. putlicensedebug(self,licdebug_)
def putlicensedebug(self,licdebug_): res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def set_debug(self, debug):\n self.debug = debug", "def debug(self, debug):\n\n self._debug = debug", "def setDebug():\n\tglobal debug\n\tdebug = True", "def setdebug(self):\n self.debug = True\n irclib.DEBUG = True\n print \"Debug mode on\"", "def _set_debug(debug):\n global _DEBUG\n _DEBUG = debug\n if debug:\n logging.disable(logging.NOTSET)\n else:\n logging.disable(logging.DEBUG)", "def set_debug(self, debug):\n self._debug = debug\n return self", "def set_debug(self, value=True):\n self.debug = value", "def set_debug(debug_val):\n global _DEBUG # noqa: PLW0603\n _DEBUG = debug_val", "def set_debug(self, debug):\n self.debug_flag = debug\n self.debug_log(\"%s: debug = %s\" % (self.__class__.__name__,\n self.debug_flag),\n flag=True)\n return debug", "def debug_mode(self, debug_mode):\n\n self._debug_mode = debug_mode", "def toggle_debug(self):\n self.__debug = not self.__debug", "def license(self, license):\n\n self._license = license", "def set_debug(state):\n global _DEBUG\n _DEBUG = bool(state)", "def add_license(fitsfile, lic):\n try:\n hdulist = pyfits.open(fitsfile, mode=\"update\")\n except:\n print(\"Oops! Something's gone wrong :-(\", file=sys.stderr)\n else:\n prihdr = hdulist[0].header\n prihdr[\"LICENSE\"] = liclist[lic][\"name\"]\n prihdr[\"LICVER\"] = liclist[lic][\"ver\"]\n prihdr[\"LICURL\"] = liclist[lic][\"url\"]\n add_comments(prihdr)\n hdulist.close()", "def catalogSetDebug(level):\n ret = libxml2mod.xmlCatalogSetDebug(level)\n return ret", "def set_debug(flag):\n global debug\n debug = flag\n XLM.XLM_Object.debug = flag\n XLM.xlm_library.debug = flag\n XLM.ms_stack_transformer.debug = flag\n XLM.stack_transformer.debug = flag\n XLM.excel2007.debug = flag", "def set_debug_mode(self, value):\n self.debug = value", "def set_debug_mode(debug_bool):\n\n MKL.MKL_DEBUG = debug_bool", "def set_debug_mode(self):\n self.debug_mode = True", "def test_debug(self, test_debug: Debug):\n\n self._test_debug = test_debug", "def set_debug(self):\n self.logger.setLevel(5)\n if self.uses_adc:\n self.adc.logger.setLevel(5)", "def _enableDebugPrint(self):\n self._dbPrint = Printer(debugPrint=True)", "def SetDebugMode(enabled=True):\n global option\n option['debug_mode'] = enabled", "def _debug():\n return _DEBUG", "def set_license_analytics(self, license_params: dict) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.OPT_IN,\n body=license_params,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def set_snippet_lics_info(self, doc, lics_info):\n self.assert_snippet_exists()\n if validations.validate_snip_lics_info(lics_info):\n doc.snippet[-1].add_lics(lics_info)\n return True\n else:\n raise SPDXValueError('Snippet::LicenseInfoInSnippet')", "def debug():", "def DEBUG(self, _strDebugMessage=\"\"):\n self.edLogging.DEBUG(_strDebugMessage)", "def install_debuginfo(self) -> None:\n pass" ]
[ "0.89798224", "0.6667688", "0.6555875", "0.6333619", "0.6249309", "0.62011623", "0.610661", "0.60838914", "0.6049035", "0.59686905", "0.59017515", "0.58994114", "0.58361554", "0.57802033", "0.5760369", "0.5759169", "0.5745018", "0.5731944", "0.5715388", "0.57055396", "0.5621381", "0.55910665", "0.55751675", "0.55656654", "0.5514362", "0.5414374", "0.540324", "0.53925776", "0.5373909", "0.5372722" ]
0.871731
1
Input a runtime license code. putlicensecode(self,code_)
def putlicensecode(self,code_): _code_minlength = value.license_buffer_length if value.license_buffer_length > 0 and code_ is not None and len(code_) != value.license_buffer_length: raise ValueError("Array argument code is not long enough: Is %d, expected %d" % (len(code_),value.license_buffer_length)) if isinstance(code_, numpy.ndarray) and code_.dtype is numpy.dtype(numpy.int32) and code_.flags.contiguous: _code_copyarray = False _code_tmp = ctypes.cast(code_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif code_ is not None: _code_copyarray = True _code_np_tmp = numpy.zeros(len(code_),numpy.dtype(numpy.int32)) _code_np_tmp[:] = code_ assert _code_np_tmp.flags.contiguous _code_tmp = ctypes.cast(_code_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _code_copyarray = False _code_tmp = None res = __library__.MSK_XX_putlicensecode(self.__nativep,_code_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putlicensecode(self,code): # 3\n if code is None:\n code_ = None\n else:\n try:\n code_ = memoryview(code)\n except TypeError:\n try:\n _tmparr_code = array.array(\"i\",code)\n except TypeError:\n raise TypeError(\"Argument code has wrong type\")\n else:\n code_ = memoryview(_tmparr_code)\n \n else:\n if code_.format != \"i\":\n code_ = memoryview(array.array(\"i\",code))\n \n if code_ is not None and len(code_) != value.license_buffer_length:\n raise ValueError(\"Array argument code has wrong length\")\n res = self.__obj.putlicensecode(code_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def set_license(self, license_code: str) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.LICENSE,\n body=license_code,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def code(self, code):\n\n self._code = code", "def code(self, code: str):\n\n self._code = code", "def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1", "def putlicensedebug(self,licdebug_):\n res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def add_code(self, code):\n self.code += code", "def update_code(self, new_code):\n self.code = new_code\n\n # Fill in the rest", "def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def update_code(self, new_code):\n\n self.code = new_code", "def update_code(self, new_code):\n\n self.code = new_code", "def code(self, code: int):\n\n self._code = code", "def code(self, code):\n if self.__code_is_set:\n raise MemoryPermissionsError(\"The code section can only be initialized and not written\")\n\n self.__code_is_set = True\n self.__code = code", "def update_code(self, new_code):\n\n new_code = self.code", "def putlicensepath(self,licensepath_):\n if isinstance(licensepath_,unicode):\n licensepath_ = licensepath_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putlicensepath(self.__nativep,licensepath_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def code(self, code):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n\n self._code = code", "def set_code(self, code):\n self.set_payload(code)", "def version_code(self, version_code):\n\n self._version_code = version_code", "def __init__(__self__, *,\n code: Optional[pulumi.Input[Union[str, 'Code']]] = None):\n if code is not None:\n pulumi.set(__self__, \"code\", code)", "def code(self, value: str) -> None:\n self._code = value", "def update_code(self):\n print ('update code')\n self.query_dict.update({'code':code.value})", "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def code():", "def code(self, code: \"str\"):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n self._attrs[\"code\"] = code", "def licence_code(self):\r\n return get_licence_code(self.key2, self.pre_code)", "def register_code(id, code):\n #print \"Adding %s to the registry\" % id\n #print code\n if _theRegistry.has_id(id):\n raise ValueError, 'key %s is already registerd' % id\n _theRegistry.add_code( id, code)", "def add_code(self, id, code):\n self.codes[id] = code", "def send_code(self, code: str) -> Dict:\n raise NotImplementedError" ]
[ "0.8494057", "0.69254506", "0.6896676", "0.6758022", "0.66151047", "0.6599839", "0.6433473", "0.6400917", "0.6400902", "0.6374781", "0.6374781", "0.6374159", "0.6374159", "0.6353943", "0.634261", "0.6289735", "0.6273301", "0.62477654", "0.6194605", "0.6178612", "0.61690074", "0.61590844", "0.61014134", "0.6051108", "0.60166866", "0.59546906", "0.5951392", "0.59037495", "0.5813534", "0.5797521" ]
0.8169754
1
Control whether mosek should wait for an available license if no license is available. putlicensewait(self,licwait_)
def putlicensewait(self,licwait_): res = __library__.MSK_XX_putlicensewait(self.__nativep,licwait_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putlicensewait(self,licwait_): # 3\n res = self.__obj.putlicensewait(licwait_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def license(self, license):\n\n self._license = license", "def accept_license():\r\n msg, status = \"\", True\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'Click on license accept button'\r\n flag1 = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n \r\n \r\n\r\n status = False if not (flag1) else True\r\n else:\r\n \r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def activate_license(self):\n response = self.client.activate_license()\n if str(response[\"result\"][\"code\"]) == \"0\" and str(response[\"data\"][\"LicenseActiveResult\"]) == \"0\":\n self.module.exit_json(msg=\"Activate license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Activate license file fail.{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def set_concluded_license(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_conc_lics_set:\n self.file_conc_lics_set = True\n if validations.validate_lics_conc(lic):\n self.file(doc).conc_lics = lic\n return True\n else:\n raise SPDXValueError('File::ConcludedLicense')\n else:\n raise CardinalityError('File::ConcludedLicense')\n else:\n raise OrderError('File::ConcludedLicense')", "def putlicensedebug(self,licdebug_):\n res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def update_license(self, sKey, sUser, sCompany):\n\t\treturn Job(SDK.PrlSrv_UpdateLicense(self.handle, sKey, sUser, sCompany)[0])", "def set_pkg_licenses_concluded(self, doc, licenses):\n self.assert_package_exists()\n if not self.package_conc_lics_set:\n self.package_conc_lics_set = True\n if validations.validate_lics_conc(licenses):\n doc.package.conc_lics = licenses\n return True\n else:\n raise SPDXValueError('Package::ConcludedLicenses')\n else:\n raise CardinalityError('Package::ConcludedLicenses')", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def set_pkg_license_declared(self, doc, lic):\n self.assert_package_exists()\n if not self.package_license_declared_set:\n self.package_license_declared_set = True\n if validations.validate_lics_conc(lic):\n doc.package.license_declared = lic\n return True\n else:\n raise SPDXValueError('Package::LicenseDeclared')\n else:\n raise CardinalityError('Package::LicenseDeclared')", "def checkoutlicense(self,feature_):\n res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def set_pkg_license_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_license_comment_set:\n self.package_license_comment_set = True\n if validations.validate_pkg_lics_comment(text):\n doc.package.license_comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('Package::LicenseComment')\n else:\n raise CardinalityError('Package::LicenseComment')", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def checkinlicense(self,feature_):\n res = __library__.MSK_XX_checkinlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def test_set_asset_license_connected(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n story.save()\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def erase_licenses(self):\n res = self._dll.JLINK_EMU_EraseLicenses()\n return (res == 0)", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def isLicensed(self):\r\n\t\treturn True", "def isLicensed(self):\r\n\t\treturn True", "def isLicensed(self):\r\n\t\treturn True", "def isLicensed(self):\r\n\t\treturn True", "def isLicensed(self):\r\n\t\treturn True", "def isLicensed(self):\r\n\t\treturn True", "def license_date(self, license_date):\n\n self._license_date = license_date", "def demomode_accept_license():\r\n msg, status = \"\", True\r\n# import genericfunctions\r\n# genericfunctions.accept_license_function()\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n\r\n # agrment_lbl = ui_controls.text_view(get_obj_identifier('EUL_agrement_labl'))\r\n #if agrment_lbl.strip() =='End User License Agreement': \r\n #print \"End user License Agreement label is displaying properly\" \r\n #else:\r\n # print \"End user License Agreement label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('EUL_agrement_labl','End User License Agreement')\r\n sleep(4) \r\n \r\n\r\n #'get the text view of the Eula acknowledge agreement text'\r\n #Agrement_text_view = ui_controls.text_view(get_obj_identifier('EULA_acknowledge_agrmrnt_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_acknowldge_agrement_txt)\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Agrement_text_view.strip():\r\n #print \"DemoMode Eula agreement acknowledgement report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Eula agreement acknowledgement report is not verified sucessfully\" \r\n \r\n 'verify Eula acknowledge agreement text'\r\n flag2,msg = element_textvalidation('EULA_acknowledge_agrmrnt_text',text_to_verify)\r\n sleep(4) \r\n 'click on eula full view element' \r\n flag3 = ui_controls.Click(get_obj_identifier('EULA_full_view')) \r\n #'get the text view of the Eula whole agreement text'\r\n #Eula_text_view = ui_controls.text_view(get_obj_identifier('EULAagrement_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_EULA_agrement_txt)\r\n\r\n # if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n # if text_to_verify.strip() == Eula_text_view.strip():\r\n #print \"DemoMode Eula agreement report verified sucessfully\"\r\n #else:\r\n # print \"DemoMode Eula agreement device report verified unsucessfully\" \r\n 'verify Eula acknowledge agreement text'\r\n flag3,msg = element_textvalidation('EULAagrement_text',text_to_verify)\r\n sleep(4) \r\n\r\n 'Click on license accept button'\r\n flag4 = ui_controls.button(get_obj_identifier('agree'))\r\n 'verify diagnostics and usage label'\r\n #diagnotsic_usage_lbl = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_lbl'))\r\n #if diagnotsic_usage_lbl.strip() =='Diagnostics and Usage': \r\n #print \"Diagnostics and Usage label is displaying properly\" \r\n #else:\r\n #print \"Diagnostics and Usage label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag5,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n\r\n ''\r\n # Diagnostic_usge_txt_view = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_txt'))\r\n #if not Diagnostic_usge_txt_view:\r\n #print \"Unable to retrieve text of diagnostics and usage text from application\"\r\n # return False, msg\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode diagnostics and usage text file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Diagnostic_usge_txt_view .strip():\r\n # print \"DemoMode Diagnostics and Usage report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Diagnostics and Usage report verified unsucessfully\" \r\n \r\n 'verify end user license agreement label'\r\n flag6,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n sleep(4) \r\n flag7 = ui_controls.button(get_obj_identifier('agree'))\r\n\r\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag5 and flag6 and flag7) else True\r\n else:\r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('a'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def isLicensed(self):\n\t\treturn True" ]
[ "0.8608505", "0.61256635", "0.6123673", "0.5970834", "0.5939061", "0.58548045", "0.5833694", "0.5822022", "0.57427907", "0.5724147", "0.56733525", "0.5650425", "0.5636824", "0.5599726", "0.5547277", "0.5536273", "0.5516888", "0.5485797", "0.5453408", "0.5441463", "0.5418717", "0.5394528", "0.5394528", "0.5394528", "0.5394528", "0.5394528", "0.5394528", "0.53755194", "0.53587407", "0.5340786" ]
0.84320354
1
Set the path to the license file. putlicensepath(self,licensepath_)
def putlicensepath(self,licensepath_): if isinstance(licensepath_,unicode): licensepath_ = licensepath_.encode("utf-8",errors="replace") res = __library__.MSK_XX_putlicensepath(self.__nativep,licensepath_) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def license(self, license):\n\n self._license = license", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def setLicenseKey(self,content):\n self.PDFreactorConfiguration.in1[\"licenseKey\"] = content", "def set_file_license_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_license_comment_set:\n self.file_license_comment_set = True\n if validations.validate_file_lics_comment(text):\n self.file(doc).license_comment = str_from_text(text)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')", "def put(self, license_handler):\n\n full_license = request.data\n return license_handler.upload_license(full_license)", "def setWriteFilePath(self, file_path):\n self.file_path = file_path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n self._path = path", "def register_license(file_path):\n result = mjlib.mj_activate(file_path)\n return result", "def create_license(self) -> None:\n # copy the license file from the template to the package folder\n # option : append other license files\n shutil.copy(CONFIG.template_path / \"LICENSE.md\", self.package_path)", "def license_date(self, license_date):\n\n self._license_date = license_date", "def set_license(self, license_code: str) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.LICENSE,\n body=license_code,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def setPath(self, path):\n if self._path != path:\n self._path = path\n self.__update_preview()", "def SetFileName(self, path):\n self.file.SetPath(path)", "def license_number(self, license_number):\n\n self._license_number = license_number", "def set_file_license_in_file(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if validations.validate_file_lics_in_file(lic):\n self.file(doc).add_lics(lic)\n return True\n else:\n raise SPDXValueError('File::LicenseInFile')\n else:\n raise OrderError('File::LicenseInFile')", "def license_model_description(self, license_model_description):\n self._license_model_description = license_model_description", "def set_pkg_license_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_license_comment_set:\n self.package_license_comment_set = True\n if validations.validate_pkg_lics_comment(text):\n doc.package.license_comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('Package::LicenseComment')\n else:\n raise CardinalityError('Package::LicenseComment')", "def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def set_output_path(self, path, timestamp=True):\n self.ui.lineEdit_output_path.setText(path)\n self.ui.checkBox_timestamp.setChecked(timestamp)", "def path(self, new_path: str):\n if os.path.exists(new_path):\n self._path = new_path\n # Call this here because it'll replace any existing arf and rmf file paths with the ones\n # currently loaded in the instance of this object.\n self._update_spec_headers(\"main\")\n else:\n raise FileNotFoundError(\"The new spectrum file does not exist\")", "def _setPath(self, path):\n self.path = os.path.abspath(path)\n\n print('path = ' + path)\n try:\n os.chdir(self.path)\n except OSError as exc:\n LOGGER.error('Path doesn''t exist: %s' % (path))\n LOGGER.exception(exc)\n raise (exc)\n\n # check for path in the new Radiance directory:\n def _checkPath(path): # create the file structure if it doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n print('Making path: '+path)", "def setPath(self, path):\n libxml2mod.xmlURISetPath(self._o, path)" ]
[ "0.88523585", "0.71379733", "0.6935849", "0.67722934", "0.67470366", "0.6633295", "0.63802004", "0.6364983", "0.6364983", "0.6364983", "0.6364983", "0.6364983", "0.6356385", "0.6281657", "0.62535214", "0.62287545", "0.6200384", "0.61479205", "0.6000918", "0.59174573", "0.59096855", "0.5893502", "0.5891376", "0.5882604", "0.58754367", "0.5852348", "0.584785", "0.5845948", "0.58431655", "0.5833728" ]
0.8595367
1
Computes vector addition and multiplication by a scalar. axpy(self,n_,alpha_,x_,y_)
def axpy(self,n_,alpha_,x_,y_): _x_minlength = (n_) if (n_) > 0 and x_ is not None and len(x_) != (n_): raise ValueError("Array argument x is not long enough: Is %d, expected %d" % (len(x_),(n_))) if x_ is None: raise ValueError("Argument x cannot be None") if x_ is None: raise ValueError("Argument x may not be None") if isinstance(x_, numpy.ndarray) and x_.dtype is numpy.dtype(numpy.float64) and x_.flags.contiguous: _x_copyarray = False _x_tmp = ctypes.cast(x_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif x_ is not None: _x_copyarray = True _x_np_tmp = numpy.zeros(len(x_),numpy.dtype(numpy.float64)) _x_np_tmp[:] = x_ assert _x_np_tmp.flags.contiguous _x_tmp = ctypes.cast(_x_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _x_copyarray = False _x_tmp = None _y_minlength = (n_) if (n_) > 0 and y_ is not None and len(y_) != (n_): raise ValueError("Array argument y is not long enough: Is %d, expected %d" % (len(y_),(n_))) if isinstance(y_,numpy.ndarray) and not y_.flags.writeable: raise ValueError("Argument y must be writable") if y_ is None: raise ValueError("Argument y may not be None") if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous: _y_copyarray = False _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif y_ is not None: _y_copyarray = True _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64)) _y_np_tmp[:] = y_ assert _y_np_tmp.flags.contiguous _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _y_copyarray = False _y_tmp = None res = __library__.MSK_XX_axpy(self.__nativep,n_,alpha_,_x_tmp,_y_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _y_copyarray: y_[:] = _y_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def axpy(self,n_,alpha_,x,y): # 3\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if x_ is not None and len(x_) != (n_):\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != (n_):\n raise ValueError(\"Array argument y has wrong length\")\n res = self.__obj.axpy(n_,alpha_,x_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_y:\n y[:] = _tmparr_y", "def t_nAx_(mt, x, n, defer=0, i=None, g=.0, method='udd'):\n return t_nAx(mt=mt, x=x, n=n, defer=defer, i=i, g=g, method=method) * np.sqrt(1 + i / 100)", "def axpby(alpha,pepx1,beta,pepx2):\n\n pepx_new = add(mul(alpha,pepx1),mul(beta,pepx))\n return pepx_new", "def __add__(self, i):\n self.n += i\n plt.subplot(self.nx, self.ny, self.n)\n return True", "def vector_space(a, alpha):\n x, y = meshgrid(linspace(-2, 2, num=20), linspace(-2, 2, num=20))\n fx, fy = stuartLandau([x, y], a, alpha)\n gx, gy = noiseFunction([x, y])\n plt.quiver(x, y, fx + gx, fy + gy, color='red')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()", "def axes(self,a_len,b_len,c_len,beta):\n self.a = np.array([0,0,a_len])\n self.b = np.array([0,b_len,0])\n self.c = Ry(-beta) @ np.array([0,0,c_len])", "def _plot_alpha2(a_list,ax):\n cm = plots.color_mapper(27,33)\n if ax is None:\n ax = plots.set_up_axis(r'$\\Delta \\tau$ [ms]',r'$\\alpha_2$','')\n\n for a,temp in a_list:\n dt,a2 = zip(*a)\n ax.step(dt,a2,\n label='%.2f'%temp,\n color=cm.get_color(temp),\n where='post')", "def __mul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def __iadd__(self, func):\n self.append_plot(func)\n return self", "def t_Ax_(mt, x, defer=0, i=None, g=.0, method='udd'):\n return t_Ax(mt=mt, x=x, defer=defer, i=i, g=g, method=method) * np.sqrt(1 + i / 100)", "def _alpha(m, d, Q):\n\n if d % 2 == 1:\n w1 = np.array([0, 1]) # t\n else:\n w1 = np.array([1]) # 1\n mat_y = _lambda(m, d + 1 - len(w1), Q)\n return _mult_poly_matrix_poly(w1, mat_y)", "def nalphas(self):\n return sum(self.alpha)", "def t_nAx(mt, x, n, defer=0, i=None, g=.0, method='udd'):\n return A_x(mt=mt, x=x, x_first=x + 1 + defer, x_last=x + n + defer, i=i, g=g, method=method)", "def scalar_vector_ext(alpha, v, a, b):\n return [alpha * v[0],\n alpha * v[0] * a + b]", "def _parameter_dot_product(x: JaxComplexArray, y: JaxComplexArray, n_axes: int) -> JaxRealArray:\n axes = tuple(range(-n_axes, 0))\n return jnp.sum(x * y, axis=axes).real", "def cb_plus(event):\n delta_alpha = pm_rate\n # Increase Alpha \n sAlpha0.set_val( np.clip(sAlpha0.val + delta_alpha, alpha_min[0], alpha_max[0]) )\n sAlpha1.set_val( np.clip(sAlpha1.val + delta_alpha, alpha_min[1], alpha_max[1]) )\n sAlpha2.set_val( np.clip(sAlpha2.val + delta_alpha, alpha_min[2], alpha_max[2]) )\n print(\"+++\")", "def __imul__(self, n):\n vectors = [n * Vector(*(p - self.center)) for p in self.points]\n self.points = [vectors[i](self.points[i]) for i in range(len(self.points))]\n return self", "def ext_mul(self, n: int, a: 'PFElement') -> 'PFElement':\n return self(self._pf_ext_mul(n, a.value, self.additive_group))", "def articulate(self, ar_in):\n #pdb.set_trace()\n ar_out = ar_in * 2 - 1\n ar_out[:, 0:2] *= N.random.beta(self.alpha, self.beta, (4, 2))\n #ar_out[:,0:2] += N.random.normal(0,0.001)\n ar_out = 0.5 * ar_out + 0.5\n return ar_out", "def alpha(self):\n sinOmg = np.sin(self.omega())\n return self.a1()/c.c*sinOmg", "def get_alpha_beta(self,n=50):\n return self.tau(self.f0(self.rho),n),self.tau_plus(self.f1(self.rho),n)", "def __mul__(self, n):\n vectors = [n * Vector(*(p - self.center)) for p in self.points]\n return Form([vectors[i](self.points[i]) for i in range(len(self.points))])", "def alpha(self, x):\n alpha = [0] * len(self.A)\n for i in xrange(len(self.A)):\n alpha[i] = self.A[i].dot(x) / self.mu\n\n # Apply projection\n alpha = self.project(alpha)\n\n return alpha", "def __iadd__(self, n):\n return _elas.SwigPyIterator___iadd__(self, n)", "def __add__(self, n):\n return _elas.SwigPyIterator___add__(self, n)", "def __init__(self, alpha, n):\n self.alpha = alpha\n self.n = n\n self.vs = []", "def Draw(Uk): \n vecx = np.zeros([n,1])\n for i in range(n):\n vecx[i][0] =(float(2*i-n+1)/(n-1))*L\n plt.plot(vecx, Uk, linewidth=1.0)\n plt.show()", "def evalComponent(self, x, p):\n if p > 0 and p <= self.n:\n p = str(p)\n y = self[\"off\"] + self[\"lin\"] * x\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y\n else:\n raise(PE.PyAValError(\"No such component (no. \" + str(p) + \")\", where=\"MultiVoigt1d::evalComponent\",\n solution=\"Use value between 1 and \" + str(self.n)))", "def apply_eqn(eqn, x):\n return eqn[0] * x + eqn[1]", "def NACA4digitsSym(self):\n self.ytu = self.NacaEquation(self.xu,self.t)\n self.ytl = -self.NacaEquation(self.xl,self.t)\n # Done for estitic reasons\n self.yu = self.ytu \n self.yl = self.ytl\n self.z = np.concatenate((self.yu, np.flip(self.yl)))\n if self.plot:\n plt.figure(self.name)\n plt.title(self.name)\n plt.plot(self.xu,self.yu)\n plt.plot(self.xl,self.yl)\n plt.axis('equal')" ]
[ "0.74733484", "0.60606563", "0.60547745", "0.5907904", "0.5887075", "0.58122003", "0.57062954", "0.55805856", "0.5475544", "0.5468158", "0.54651934", "0.54209197", "0.541921", "0.5392053", "0.5377396", "0.5374282", "0.5341102", "0.5317172", "0.5305736", "0.52814037", "0.5274094", "0.5242216", "0.5234424", "0.5219142", "0.51856047", "0.5184557", "0.51747626", "0.51520306", "0.5144937", "0.5113988" ]
0.7002267
1
Computes dense matrix times a dense vector product. gemv(self,transa_,m_,n_,alpha_,a_,x_,beta_,y_)
def gemv(self,transa_,m_,n_,alpha_,a_,x_,beta_,y_): _a_minlength = ((n_) * (m_)) if ((n_) * (m_)) > 0 and a_ is not None and len(a_) != ((n_) * (m_)): raise ValueError("Array argument a is not long enough: Is %d, expected %d" % (len(a_),((n_) * (m_)))) if a_ is None: raise ValueError("Argument a cannot be None") if a_ is None: raise ValueError("Argument a may not be None") if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous: _a_copyarray = False _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif a_ is not None: _a_copyarray = True _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64)) _a_np_tmp[:] = a_ assert _a_np_tmp.flags.contiguous _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _a_copyarray = False _a_tmp = None if ((transa_) == transpose.no): __tmp_var_0 = (n_); else: __tmp_var_0 = (m_); _x_minlength = __tmp_var_0 if __tmp_var_0 > 0 and x_ is not None and len(x_) != __tmp_var_0: raise ValueError("Array argument x is not long enough: Is %d, expected %d" % (len(x_),__tmp_var_0)) if x_ is None: raise ValueError("Argument x cannot be None") if x_ is None: raise ValueError("Argument x may not be None") if isinstance(x_, numpy.ndarray) and x_.dtype is numpy.dtype(numpy.float64) and x_.flags.contiguous: _x_copyarray = False _x_tmp = ctypes.cast(x_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif x_ is not None: _x_copyarray = True _x_np_tmp = numpy.zeros(len(x_),numpy.dtype(numpy.float64)) _x_np_tmp[:] = x_ assert _x_np_tmp.flags.contiguous _x_tmp = ctypes.cast(_x_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _x_copyarray = False _x_tmp = None if ((transa_) == transpose.no): __tmp_var_1 = (m_); else: __tmp_var_1 = (n_); _y_minlength = __tmp_var_1 if __tmp_var_1 > 0 and y_ is not None and len(y_) != __tmp_var_1: raise ValueError("Array argument y is not long enough: Is %d, expected %d" % (len(y_),__tmp_var_1)) if isinstance(y_,numpy.ndarray) and not y_.flags.writeable: raise ValueError("Argument y must be writable") if y_ is None: raise ValueError("Argument y may not be None") if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous: _y_copyarray = False _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif y_ is not None: _y_copyarray = True _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64)) _y_np_tmp[:] = y_ assert _y_np_tmp.flags.contiguous _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _y_copyarray = False _y_tmp = None res = __library__.MSK_XX_gemv(self.__nativep,transa_,m_,n_,alpha_,_a_tmp,_x_tmp,beta_,_y_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _y_copyarray: y_[:] = _y_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gemv(self,transa_,m_,n_,alpha_,a,x,beta_,y): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a has wrong length\")\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n if x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n if y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y has wrong length\")\n res = self.__obj.gemv(transa_,m_,n_,alpha_,a_,x_,beta_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_y:\n y[:] = _tmparr_y", "def magma_sgemv(trans, m, n, alpha, dA, ldda, dx, incx, beta,\n dy, incy, queue):\n\n _libmagma.magma_sgemv(trans, m, n, alpha, int(dA), ldda, dx, incx,\n beta, int(dy), incy, queue)", "def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a,b,beta_,c): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if not isinstance(transb_,transpose): raise TypeError(\"Argument transb has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n \n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n \n if b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b has wrong length\")\n if c is None: raise TypeError(\"Invalid type for argument c\")\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.gemm(transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_c:\n c[:] = _tmparr_c", "def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_):\n _a_minlength = ((m_) * (k_))\n if ((m_) * (k_)) > 0 and a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((m_) * (k_))))\n if a_ is None:\n raise ValueError(\"Argument a cannot be None\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n _b_minlength = ((k_) * (n_))\n if ((k_) * (n_)) > 0 and b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b is not long enough: Is %d, expected %d\" % (len(b_),((k_) * (n_))))\n if b_ is None:\n raise ValueError(\"Argument b cannot be None\")\n if b_ is None:\n raise ValueError(\"Argument b may not be None\")\n if isinstance(b_, numpy.ndarray) and b_.dtype is numpy.dtype(numpy.float64) and b_.flags.contiguous:\n _b_copyarray = False\n _b_tmp = ctypes.cast(b_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif b_ is not None:\n _b_copyarray = True\n _b_np_tmp = numpy.zeros(len(b_),numpy.dtype(numpy.float64))\n _b_np_tmp[:] = b_\n assert _b_np_tmp.flags.contiguous\n _b_tmp = ctypes.cast(_b_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _b_copyarray = False\n _b_tmp = None\n \n _c_minlength = ((m_) * (n_))\n if ((m_) * (n_)) > 0 and c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c is not long enough: Is %d, expected %d\" % (len(c_),((m_) * (n_))))\n if isinstance(c_,numpy.ndarray) and not c_.flags.writeable:\n raise ValueError(\"Argument c must be writable\")\n if c_ is None:\n raise ValueError(\"Argument c may not be None\")\n if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous:\n _c_copyarray = False\n _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif c_ is not None:\n _c_copyarray = True\n _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64))\n _c_np_tmp[:] = c_\n assert _c_np_tmp.flags.contiguous\n _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _c_copyarray = False\n _c_tmp = None\n \n res = __library__.MSK_XX_gemm(self.__nativep,transa_,transb_,m_,n_,k_,alpha_,_a_tmp,_b_tmp,beta_,_c_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _c_copyarray:\n c_[:] = _c_np_tmp", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def test_gemm_opt0():\r\n X, Y, Z, a, b = XYZab()\r\n\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])\r\n\r\n #with transposes (transposes should be pushed through dot in canonicalize)\r\n just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])\r\n just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],\r\n ishapes=[(5, 3), (3, 4), (4, 5), (), ()])\r\n\r\n #with N multiplications instead of just one\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])", "def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]", "def csrgemm_ez(self, matA, matB, transA='N', transB='N', descrA=None,\n descrB=None, descrC=None):\n tmpdescr = self.matdescr()\n descrA = descrA or tmpdescr\n descrB = descrB or tmpdescr\n descrC = descrC or tmpdescr\n\n dtype = matA.dtype\n m, ka = matA.shape\n kb, n = matB.shape\n if ka != kb:\n raise ValueError(\"incompatible matrices\")\n k = ka\n\n indptrC = cuda.device_array(m + 1, dtype='int32')\n nnz = self.XcsrgemmNnz(transA, transB, m, n, k, descrA, matA.nnz,\n matA.indptr, matA.indices, descrB, matB.nnz,\n matB.indptr, matB.indices, descrC, indptrC)\n\n if nnz == 0:\n raise ValueError(\"result is entirely zero\")\n\n dataC = cuda.device_array(nnz, dtype=dtype)\n indicesC = cuda.device_array(nnz, dtype='int32')\n self.csrgemm(transA, transB, m, n, k, descrA, matA.nnz, matA.data,\n matA.indptr, matA.indices, descrB, matB.nnz, matB.data,\n matB.indptr, matB.indices, descrC, dataC, indptrC,\n indicesC)\n\n return CudaCSRMatrix().from_attributes(data=dataC, indices=indicesC,\n indptr=indptrC, shape=(m, n),\n dtype=dtype, nnz=nnz)", "def _fix_gemm(self, op_name, inputs, old_attr):\n op = getattr(mx.sym, op_name, None)\n alpha = float(old_attr.get('alpha', 1.0))\n beta = float(old_attr.get('beta', 1.0))\n transA = int(old_attr.get('transA', 0))\n transB = int(old_attr.get('transB', 0))\n if transA:\n inputs[0] = mx.sym.transpose(inputs[0], axes=(1, 0))\n if not transB:\n inputs[1] = mx.sym.transpose(inputs[1], axes=(1, 0))\n new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]]\n new_attr = {'num_hidden' : self._params[inputs[2].name].shape[0]}\n return op, new_inputs, new_attr", "def SpMV_viaMKL( A, x ):\n SpMV = mkl.mkl_cspblas_dcsrgemv\n # Dissecting the \"cspblas_dcsrgemv\" name:\n # \"c\" - for \"c-blas\" like interface (as opposed to fortran)\n # Also means expects sparse arrays to use 0-based indexing, which python does\n # \"sp\" for sparse\n # \"d\" for double-precision\n # \"csr\" for compressed row format\n # \"ge\" for \"general\", e.g., the matrix has no special structure such as symmetry\n # \"mv\" for \"matrix-vector\" multiply\n\n if not sparse.isspmatrix_csr(A):\n raise Exception(\"Matrix must be in csr format\")\n (m,n) = A.shape\n\n # The data of the matrix\n data = A.data.ctypes.data_as(POINTER(c_double))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n nVectors = 1\n if x.ndim is 1:\n y = np.empty(m,dtype=np.double,order='F')\n if x.size != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n elif x.shape[1] is 1:\n y = np.empty((m,1),dtype=np.double,order='F')\n if x.shape[0] != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n else:\n nVectors = x.shape[1]\n y = np.empty((m,nVectors),dtype=np.double,order='F')\n if x.shape[0] != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n\n # Check input\n if x.dtype.type is not np.double:\n x = x.astype(np.double,copy=True)\n # Put it in column-major order, otherwise for nVectors > 1 this FAILS completely\n if x.flags['F_CONTIGUOUS'] is not True:\n x = x.copy(order='F')\n\n if nVectors == 1:\n np_x = x.ctypes.data_as(POINTER(c_double))\n np_y = y.ctypes.data_as(POINTER(c_double))\n # now call MKL. This returns the answer in np_y, which links to y\n SpMV(byref(c_char(b\"N\")), byref(c_int(m)),data ,indptr, indices, np_x, np_y ) \n else:\n for columns in range(nVectors):\n xx = x[:,columns]\n yy = y[:,columns]\n np_x = xx.ctypes.data_as(POINTER(c_double))\n np_y = yy.ctypes.data_as(POINTER(c_double))\n SpMV(byref(c_char(b\"N\")), byref(c_int(m)),data,indptr, indices, np_x, np_y ) \n\n return y", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Translate(*args, **kwargs)", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def affine_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat2(0)\r\n M2 = mat2(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def g_multivariate_normal(x,M):\n return .5*np.dot(x,M+M.T)", "def magma_zgels_gpu(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_zgels_gpu(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def forward(self, states):\n return np.matmul(states, self._M)", "def similarity_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n mu = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n mu += w[i]*(p_adj.dot(p_adj))\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj/mu\r\n v_out += q_wgt\r\n return v_out", "def local_gemm_to_gemv(node):\r\n if node.op == gemm_no_inplace:\r\n z, a, x, y, b = node.inputs\r\n if z.broadcastable == x.broadcastable == (True, False):\r\n r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)\r\n return [r.dimshuffle('x', 0)]\r\n if z.broadcastable == y.broadcastable == (False, True):\r\n r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)\r\n return [r.dimshuffle(0, 'x')]", "def np_matmul(mat1, mat2):\n return mat1.dot(mat2)", "def matrix_mult_vec(matrix_a, x):\n m = len(matrix_a)\n b = [0 for i in xrange(m)]\n for i in xrange(m):\n b[i] = dot_product(matrix_a[i], x)\n return b", "def spmv (n, A, x):\n y = dense_vector (n)\n for (i, A_i) in A.items ():\n s = 0\n for (j, a_ij) in A_i.items ():\n s += a_ij * x[j]\n y[i] = s\n return y", "def matvec(self, x):\n return self * x", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def multMatVect(v, A, m1, B, m2):\r\n if multMatVect.dot_modulo is None:\r\n A_sym = tensor.lmatrix('A')\r\n s_sym = tensor.ivector('s')\r\n m_sym = tensor.iscalar('m')\r\n A2_sym = tensor.lmatrix('A2')\r\n s2_sym = tensor.ivector('s2')\r\n m2_sym = tensor.iscalar('m2')\r\n o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)\r\n multMatVect.dot_modulo = function(\r\n [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)\r\n\r\n # This way of calling the Theano fct is done to bypass Theano overhead.\r\n f = multMatVect.dot_modulo\r\n f.input_storage[0].storage[0] = A\r\n f.input_storage[1].storage[0] = v[:3]\r\n f.input_storage[2].storage[0] = m1\r\n f.input_storage[3].storage[0] = B\r\n f.input_storage[4].storage[0] = v[3:]\r\n f.input_storage[5].storage[0] = m2\r\n f.fn()\r\n r = f.output_storage[0].storage[0]\r\n\r\n return r", "def rigid_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj\r\n r = math.sqrt(v_out.dot(v_out))\r\n v_out /= r\r\n v_sub = v - p_wgt\r\n r = math.sqrt(v_sub.dot(v_sub))\r\n v_out *= r\r\n v_out += q_wgt\r\n return v_out", "def magma_sgetrf_m(ngpu,m, n, A, lda, ipiv):\n\n info = c_int_type()\n status = _libmagma.magma_sgetrf_m(ngpu,m, n, int(A), lda,\n int(ipiv), ctypes.byref(info))\n magmaCheckStatus(status)", "def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)", "def get_transform_matrix(gamma, a, epsilon=1e-8):\n return (np.diag(1.0 / (a + epsilon)) @ gamma).T", "def DerivMatrixExponential(dG, alpha, S, Sinv, D):\n (n1, n2) = dG.shape\n assert n1 == n2, \"dG is not a square matrix.\"\n n = n1\n assert S.shape == (n, n), 'S does not have the correct dimensions.'\n assert Sinv.shape == (n, n), 'S does not have the correct dimensions.'\n assert D.shape == (n, ), 'D does not have the correct dimensions.'\n assert isinstance(alpha, (int, float)) or alpha.shape == ()\n B = numpy.dot(numpy.dot(Sinv, dG), S)\n expalphaD = numpy.exp(alpha * D)\n V = numpy.ndarray((n, n))\n for x in range(n):\n for y in range(n):\n if x != y:\n V[x, y] = B[x, y] * (expalphaD[x] - expalphaD[y]) / (D[x] - D[y])\n else:\n V[x, y] = B[x, x] * alpha * expalphaD[x]\n return numpy.dot(numpy.dot(S, V), Sinv)", "def _create_gemm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n alpha = onnx_node.getattr('alpha', 1.)\n beta = onnx_node.getattr('beta', 1.)\n transA = onnx_node.getattr('transA', 0)\n transB = onnx_node.getattr('transB', 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(alpha=alpha,\n beta=beta,\n transA=transA,\n transB=transB)" ]
[ "0.79829377", "0.7744591", "0.6858341", "0.6395874", "0.6372895", "0.6168905", "0.5858304", "0.5748902", "0.5633987", "0.5538611", "0.5522281", "0.5460428", "0.54414135", "0.53847235", "0.5364882", "0.5344385", "0.53304935", "0.5325173", "0.5298962", "0.52883595", "0.5280534", "0.52719855", "0.5271697", "0.52580494", "0.5257961", "0.5254506", "0.5250269", "0.52498305", "0.5245721", "0.52442336" ]
0.792816
1
Performs a dense matrix multiplication. gemm(self,transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_)
def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_): _a_minlength = ((m_) * (k_)) if ((m_) * (k_)) > 0 and a_ is not None and len(a_) != ((m_) * (k_)): raise ValueError("Array argument a is not long enough: Is %d, expected %d" % (len(a_),((m_) * (k_)))) if a_ is None: raise ValueError("Argument a cannot be None") if a_ is None: raise ValueError("Argument a may not be None") if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous: _a_copyarray = False _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif a_ is not None: _a_copyarray = True _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64)) _a_np_tmp[:] = a_ assert _a_np_tmp.flags.contiguous _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _a_copyarray = False _a_tmp = None _b_minlength = ((k_) * (n_)) if ((k_) * (n_)) > 0 and b_ is not None and len(b_) != ((k_) * (n_)): raise ValueError("Array argument b is not long enough: Is %d, expected %d" % (len(b_),((k_) * (n_)))) if b_ is None: raise ValueError("Argument b cannot be None") if b_ is None: raise ValueError("Argument b may not be None") if isinstance(b_, numpy.ndarray) and b_.dtype is numpy.dtype(numpy.float64) and b_.flags.contiguous: _b_copyarray = False _b_tmp = ctypes.cast(b_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif b_ is not None: _b_copyarray = True _b_np_tmp = numpy.zeros(len(b_),numpy.dtype(numpy.float64)) _b_np_tmp[:] = b_ assert _b_np_tmp.flags.contiguous _b_tmp = ctypes.cast(_b_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _b_copyarray = False _b_tmp = None _c_minlength = ((m_) * (n_)) if ((m_) * (n_)) > 0 and c_ is not None and len(c_) != ((m_) * (n_)): raise ValueError("Array argument c is not long enough: Is %d, expected %d" % (len(c_),((m_) * (n_)))) if isinstance(c_,numpy.ndarray) and not c_.flags.writeable: raise ValueError("Argument c must be writable") if c_ is None: raise ValueError("Argument c may not be None") if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous: _c_copyarray = False _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif c_ is not None: _c_copyarray = True _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64)) _c_np_tmp[:] = c_ assert _c_np_tmp.flags.contiguous _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _c_copyarray = False _c_tmp = None res = __library__.MSK_XX_gemm(self.__nativep,transa_,transb_,m_,n_,k_,alpha_,_a_tmp,_b_tmp,beta_,_c_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _c_copyarray: c_[:] = _c_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a,b,beta_,c): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if not isinstance(transb_,transpose): raise TypeError(\"Argument transb has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n \n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n \n if b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b has wrong length\")\n if c is None: raise TypeError(\"Invalid type for argument c\")\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.gemm(transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_c:\n c[:] = _tmparr_c", "def gemv(self,transa_,m_,n_,alpha_,a_,x_,beta_,y_):\n _a_minlength = ((n_) * (m_))\n if ((n_) * (m_)) > 0 and a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((n_) * (m_))))\n if a_ is None:\n raise ValueError(\"Argument a cannot be None\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n _x_minlength = __tmp_var_0\n if __tmp_var_0 > 0 and x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x is not long enough: Is %d, expected %d\" % (len(x_),__tmp_var_0))\n if x_ is None:\n raise ValueError(\"Argument x cannot be None\")\n if x_ is None:\n raise ValueError(\"Argument x may not be None\")\n if isinstance(x_, numpy.ndarray) and x_.dtype is numpy.dtype(numpy.float64) and x_.flags.contiguous:\n _x_copyarray = False\n _x_tmp = ctypes.cast(x_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif x_ is not None:\n _x_copyarray = True\n _x_np_tmp = numpy.zeros(len(x_),numpy.dtype(numpy.float64))\n _x_np_tmp[:] = x_\n assert _x_np_tmp.flags.contiguous\n _x_tmp = ctypes.cast(_x_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _x_copyarray = False\n _x_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n _y_minlength = __tmp_var_1\n if __tmp_var_1 > 0 and y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),__tmp_var_1))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if y_ is None:\n raise ValueError(\"Argument y may not be None\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n res = __library__.MSK_XX_gemv(self.__nativep,transa_,m_,n_,alpha_,_a_tmp,_x_tmp,beta_,_y_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _y_copyarray:\n y_[:] = _y_np_tmp", "def gemv(self,transa_,m_,n_,alpha_,a,x,beta_,y): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a has wrong length\")\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n if x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n if y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y has wrong length\")\n res = self.__obj.gemv(transa_,m_,n_,alpha_,a_,x_,beta_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_y:\n y[:] = _tmparr_y", "def csrgemm_ez(self, matA, matB, transA='N', transB='N', descrA=None,\n descrB=None, descrC=None):\n tmpdescr = self.matdescr()\n descrA = descrA or tmpdescr\n descrB = descrB or tmpdescr\n descrC = descrC or tmpdescr\n\n dtype = matA.dtype\n m, ka = matA.shape\n kb, n = matB.shape\n if ka != kb:\n raise ValueError(\"incompatible matrices\")\n k = ka\n\n indptrC = cuda.device_array(m + 1, dtype='int32')\n nnz = self.XcsrgemmNnz(transA, transB, m, n, k, descrA, matA.nnz,\n matA.indptr, matA.indices, descrB, matB.nnz,\n matB.indptr, matB.indices, descrC, indptrC)\n\n if nnz == 0:\n raise ValueError(\"result is entirely zero\")\n\n dataC = cuda.device_array(nnz, dtype=dtype)\n indicesC = cuda.device_array(nnz, dtype='int32')\n self.csrgemm(transA, transB, m, n, k, descrA, matA.nnz, matA.data,\n matA.indptr, matA.indices, descrB, matB.nnz, matB.data,\n matB.indptr, matB.indices, descrC, dataC, indptrC,\n indicesC)\n\n return CudaCSRMatrix().from_attributes(data=dataC, indices=indicesC,\n indptr=indptrC, shape=(m, n),\n dtype=dtype, nnz=nnz)", "def test_gemm_opt0():\r\n X, Y, Z, a, b = XYZab()\r\n\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])\r\n\r\n #with transposes (transposes should be pushed through dot in canonicalize)\r\n just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])\r\n just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],\r\n ishapes=[(5, 3), (3, 4), (4, 5), (), ()])\r\n\r\n #with N multiplications instead of just one\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])", "def _fix_gemm(self, op_name, inputs, old_attr):\n op = getattr(mx.sym, op_name, None)\n alpha = float(old_attr.get('alpha', 1.0))\n beta = float(old_attr.get('beta', 1.0))\n transA = int(old_attr.get('transA', 0))\n transB = int(old_attr.get('transB', 0))\n if transA:\n inputs[0] = mx.sym.transpose(inputs[0], axes=(1, 0))\n if not transB:\n inputs[1] = mx.sym.transpose(inputs[1], axes=(1, 0))\n new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]]\n new_attr = {'num_hidden' : self._params[inputs[2].name].shape[0]}\n return op, new_inputs, new_attr", "def magma_sgemv(trans, m, n, alpha, dA, ldda, dx, incx, beta,\n dy, incy, queue):\n\n _libmagma.magma_sgemv(trans, m, n, alpha, int(dA), ldda, dx, incx,\n beta, int(dy), incy, queue)", "def _create_gemm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n alpha = onnx_node.getattr('alpha', 1.)\n beta = onnx_node.getattr('beta', 1.)\n transA = onnx_node.getattr('transA', 0)\n transB = onnx_node.getattr('transB', 0)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(alpha=alpha,\n beta=beta,\n transA=transA,\n transB=transB)", "def magma_zgels_gpu(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_zgels_gpu(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def magma_cgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_cgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def _create_gemm(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', float(op.alpha)),\n helper.make_attribute('beta', float(op.beta)),\n helper.make_attribute('transA', op.transA),\n helper.make_attribute('transB', op.transB),\n ])\n\n return node", "def magma_cgels_gpu(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_cgels_gpu(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def magma_zgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_zgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def Exp(A, B):\n return A.dot(expm(B))", "def make_gemm_pattern(with_bias=True, with_act=None, out_dtype=\"float16\"):\n data = wildcard()\n weight = wildcard()\n bias = wildcard()\n gemm = is_op(\"nn.dense\")(data, weight)\n if with_bias:\n add_or_bias_add = is_op(\"add\") | is_op(\"nn.bias_add\")\n gemm_out = add_or_bias_add(gemm, bias)\n else:\n gemm_out = gemm\n\n if with_act is None:\n return gemm_out\n if isinstance(with_act, str) and with_act == \"relu\":\n return is_op(\"nn.relu\")(gemm_out)\n\n assert isinstance(with_act, str) and with_act == \"gelu\"\n return make_gelu_pattern(gemm_out, out_dtype)", "def matrix_mult(m1, m2):\n pass", "def gmm_bayes_activation(TLL):\n\n K_dim = TLL.get_shape()[-1]\n max_TLL = K.max(TLL, axis=-1)\n max_TLL = K.repeat_elements(K.expand_dims(max_TLL, axis=-1), K_dim, axis=-1)\n ETLL = K.exp(TLL - max_TLL)\n SETLL = K.sum(ETLL, -1)\n rep_SETLL = K.repeat_elements(K.expand_dims(SETLL, axis=-1), K_dim, axis=-1)\n\n depended_prob = ETLL / rep_SETLL\n\n return depended_prob", "def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node", "def magma_dgels_gpu(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_dgels_gpu(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def mgc2b(mgc, alpha=0.35, gamma=0.0):\n\n b = mc2b(mgc, alpha)\n if gamma == 0:\n return b\n\n b = gnorm(b, gamma)\n\n b[0] = np.log(b[0])\n b[1:] *= gamma\n\n return b", "def cost_gmm(y, mu, sig, weight):\n n_dim = y.ndim\n shape_y = y.shape\n\n k = weight.shape[-1]\n\n y = y.reshape((-1, shape_y[-1]))\n y = tensor.shape_padright(y)\n\n mu = mu.reshape((-1, shape_y[-1], k))\n sig = sig.reshape((-1, shape_y[-1], k))\n weight = weight.reshape((-1, k))\n\n diff = tensor.sqr(y - mu)\n\n inner = -0.5 * tensor.sum(\n diff / sig**2 +\n 2 * tensor.log(sig) + tensor.log(2 * numpy.pi), axis=-2)\n\n nll = -logsumexp(tensor.log(weight) + inner, axis=-1)\n\n return nll.reshape(shape_y[:-1], ndim=n_dim - 1)", "def local_gemm_to_ger(node):\r\n if node.op == gemm_no_inplace:\r\n z, a, x, y, b = node.inputs\r\n if x.broadcastable[1] and y.broadcastable[0]:\r\n # x and y are both vectors so this might qualifies for a GER\r\n xv = x.dimshuffle(0)\r\n yv = y.dimshuffle(1)\r\n try:\r\n bval = T.get_scalar_constant_value(b)\r\n except T.NotScalarConstantError:\r\n # b isn't a constant, GEMM is doing useful pre-scaling\r\n return\r\n\r\n if bval == 1: # best case a natural GER\r\n rval = ger(z, a, xv, yv)\r\n return [rval]\r\n elif bval == 0: # GER on zeros_like should be faster than GEMM\r\n zeros = T.zeros([x.shape[0], y.shape[1]], x.dtype)\r\n rval = ger(zeros, a, xv, yv)\r\n return [rval]\r\n else:\r\n # if bval is another constant, then z is being usefully\r\n # pre-scaled and GER isn't really the right tool for the job.\r\n return", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def convert_bmm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n y = _op.transpose(y, [0, 2, 1])\n out = _op.nn.batch_matmul(x, y)\n g.add_node(op.output(\"Out\")[0], out)" ]
[ "0.797159", "0.6984396", "0.68837875", "0.6616794", "0.6429315", "0.6350331", "0.6247334", "0.60708445", "0.57550097", "0.5749969", "0.5730603", "0.5723331", "0.56517935", "0.56435114", "0.5592844", "0.5431652", "0.5431652", "0.5407086", "0.53182805", "0.5267758", "0.51957977", "0.51679546", "0.51400054", "0.51167125", "0.51152223", "0.5114129", "0.5112681", "0.50999373", "0.5099916", "0.5097759" ]
0.7851844
1
Performs a rankk update of a symmetric matrix. syrk(self,uplo_,trans_,n_,k_,alpha_,a_,beta_,c_)
def syrk(self,uplo_,trans_,n_,k_,alpha_,a_,beta_,c_): _a_minlength = ((n_) * (k_)) if ((n_) * (k_)) > 0 and a_ is not None and len(a_) != ((n_) * (k_)): raise ValueError("Array argument a is not long enough: Is %d, expected %d" % (len(a_),((n_) * (k_)))) if a_ is None: raise ValueError("Argument a cannot be None") if a_ is None: raise ValueError("Argument a may not be None") if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous: _a_copyarray = False _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif a_ is not None: _a_copyarray = True _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64)) _a_np_tmp[:] = a_ assert _a_np_tmp.flags.contiguous _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _a_copyarray = False _a_tmp = None _c_minlength = ((n_) * (n_)) if ((n_) * (n_)) > 0 and c_ is not None and len(c_) != ((n_) * (n_)): raise ValueError("Array argument c is not long enough: Is %d, expected %d" % (len(c_),((n_) * (n_)))) if isinstance(c_,numpy.ndarray) and not c_.flags.writeable: raise ValueError("Argument c must be writable") if c_ is None: raise ValueError("Argument c may not be None") if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous: _c_copyarray = False _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif c_ is not None: _c_copyarray = True _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64)) _c_np_tmp[:] = c_ assert _c_np_tmp.flags.contiguous _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _c_copyarray = False _c_tmp = None res = __library__.MSK_XX_syrk(self.__nativep,uplo_,trans_,n_,k_,alpha_,_a_tmp,beta_,_c_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _c_copyarray: c_[:] = _c_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def syrk(self,uplo_,trans_,n_,k_,alpha_,a,beta_,c): # 3\n if not isinstance(uplo_,uplo): raise TypeError(\"Argument uplo has wrong type\")\n if not isinstance(trans_,transpose): raise TypeError(\"Argument trans has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (k_)):\n raise ValueError(\"Array argument a has wrong length\")\n if c is None: raise TypeError(\"Invalid type for argument c\")\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((n_) * (n_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.syrk(uplo_,trans_,n_,k_,alpha_,a_,beta_,c_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_c:\n c[:] = _tmparr_c", "def K(self):\n\n # Calculate and return the stiffness matrix in global coordinates\n return matmul(matmul(inv(self.T()), self.k()), self.T())", "def symmetric(k):\r\n k_ = k.copy()\r\n k_.parts = [parts.symmetric.Symmetric(p) for p in k.parts]\r\n return k_", "def sym_K(self):\n raise NotImplementedError", "def symeigLanczos(A, k, extreme=\"both\", *, sparse=False, dim=None):\n Qk, T = Lanczos(A, k, sparse=sparse, dim=dim)\n eigvalsQ, eigvectorsQ = torch.symeig(T, eigenvectors=True)\n eigvectorsQ = torch.matmul(Qk, eigvectorsQ)\n if extreme == \"both\":\n return eigvalsQ[0], eigvectorsQ[:, 0], eigvalsQ[-1], eigvectorsQ[:, -1]\n elif extreme == \"min\":\n return eigvalsQ[0], eigvectorsQ[:, 0]\n elif extreme == \"max\":\n return eigvalsQ[-1], eigvectorsQ[:, -1]", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T", "def ALRA(X, k=None, n_iter=10):\n if k is None:\n k = choose_k(X)\n log.info(f\"No `k` given. Automatically determined `k={k}`.\")\n\n # Compute the SVD and compute the rank-k reconstruction\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n X_rank_k = U * s @ Va\n\n X_rank_k = np.ma.masked_array(X_rank_k)\n\n # Find the absolute values of the minimum expression levels for each gene\n minimum_expressions = np.abs(np.min(X_rank_k, axis=0))\n # Zero out all expressions with values below the gene minimum value\n X_rank_k[X_rank_k <= minimum_expressions] = np.ma.masked\n\n # Rescale the expressions so the first two moments match the original matrix\n X_mean, X_std = nonzero_mean(X, axis=0), nonzero_std(X, axis=0, ddof=1)\n X_rk_mean, X_rk_std = X_rank_k.mean(axis=0), X_rank_k.std(axis=0, ddof=1)\n\n scale = X_std / X_rk_std\n translate = -X_rk_mean * scale + X_mean\n\n scale_columns = ~np.isnan(X_std) & ~np.isnan(X_rk_std)\n X_rank_k[:, scale_columns] *= scale[scale_columns]\n X_rank_k[:, scale_columns] += translate[scale_columns]\n\n # Values can become negative during rescaling, so we zero those out\n X_rank_k[X_rank_k < 0] = np.ma.masked\n\n # Restore potentially zeroed out expression values which appeared in the\n # original expression matrix. Where both values are non-zero, prefer the\n # rank-k approximation\n zeroed_out_indices = find_zeroed_indices(X_rank_k, X)\n X_rank_k[zeroed_out_indices] = X[zeroed_out_indices]\n\n log.info(\n f\"{len(zeroed_out_indices[0])} original expression values were \"\n f\"zeroed out during imputation and restored to original values.\"\n )\n\n X_rank_k = X_rank_k.filled(0)\n\n return X_rank_k", "def cluster_k_correction(self):\n\n # Load in the IRAC 4.5 um filter as the observed filter\n irac_45 = SpectralElement.from_file('Data_Repository/filter_curves/Spitzer_IRAC/080924ch2trans_full.txt',\n wave_unit=u.um)\n\n # Store the official IRAC 4.5 um zero point flux for K-correction computations\n irac_45_zp = 179.7 * u.Jy\n\n # If the requested output zero-point is 'vega', pre-load the Vega reference spectrum\n if isinstance(self._output_zero_pt, str) and self._output_zero_pt.lower() == 'vega':\n self._output_zero_pt = SourceSpectrum.from_vega()\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Retrieve the cluster redshift from the SPT catalog\n catalog_idx = cluster_info['SPT_cat_idx']\n cluster_z = self._spt_catalog['REDSHIFT'][catalog_idx]\n\n # Compute the K-correction for the cluster's redshift, the given SED and output parameters\n k_corr = k_correction(z=cluster_z, f_lambda=self._sed,\n g_lambda_R=irac_45_zp, g_lambda_Q=self._output_zero_pt,\n R=irac_45, Q=self._output_filter)\n\n # Store the cluster redshift and K-correction in cluster_info for later use\n cluster_info['redshift'] = cluster_z\n cluster_info['k-correction'] = k_corr", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def matrix_K2(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- w21(l, xt), d23(l, xt)))\n row2 = np.array((- w41(l, xt, zt, rhos, rho), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def eval_K_chol(self, S, sigma_n, sigma_f):\n K = self.eval_K(S)\n K += sigma_n * np.eye(K.shape[0])\n K_chol = jitchol(K)\n return K_chol", "def SSPRKm(m):\n from sympy import factorial, Rational\n\n assert m>=2, \"SSPRKm methods must have m>=2\"\n\n alph=snp.zeros([m+1,m+1])\n alph[1,0]=1\n for mm in range(2,m+1):\n for k in range(1,m):\n alph[mm,k]= Rational(alph[mm-1,k-1],k)\n alph[mm,mm-1]=Rational(1,factorial(mm))\n alph[mm,0] = 1-sum(alph[mm,1:])\n\n alpha=np.vstack([snp.zeros(m),snp.eye(m)])\n alpha[m,m-1]=Rational(1/factorial(m))\n beta=alpha.copy()\n alpha[m,1:m-1]=alph[m,1:m-1]\n alpha[m,0] = 1-sum(alpha[m,1:])\n name='SSPRK'+str(m)*2\n return ExplicitRungeKuttaMethod(alpha=alpha,beta=beta,name=name,shortname=name)", "def sympykern(input_dim, k=None, output_dim=1, name=None, param=None):\r\n return kern(input_dim, [spkern(input_dim, k=k, output_dim=output_dim, name=name, param=param)])", "def _set_ks_dynamic(self, ks):\n assert(len(ks) == len(self.idxs))\n self.ks = ks\n if np.max(self.ks) > self._kret:\n self._kret = np.max(self.ks)", "def magma_sorgqr2(m, n, k, A, lda, tau):\n info = c_int_type()\n status = _libmagma.magma_sorgqr2(m, n, k, int(A), lda,\n int(tau), ctypes.byref(info))\n magmaCheckStatus(status)", "def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)", "def magmablas_zsymmetrize(uplo, n, A, lda):\n\n uplo = _uplo_conversion[uplo]\n status = _libmagma.magmablas_zsymmetrize(uplo, n, int(A), lda)\n magmaCheckStatus(status)", "def iterate(rk):\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk", "def simple_kcss2(A, rank):\n # stage 1: initial setup\n _, n = A.shape\n _, _, vh = np.linalg.svd(A)\n V_k = vh[:rank].T # size n x k\n part1 = np.linalg.norm(V_k, axis=1) ** 2 / (2 * rank) # size n\n AVV_T = A @ V_k @ V_k.T\n part2_top = np.linalg.norm(A, axis=0) ** 2 - np.linalg.norm(AVV_T, axis=0) ** 2 # size n\n part2_bottom = 2 * (np.linalg.norm(A, ord='fro') ** 2 - np.linalg.norm(AVV_T, ord='fro') ** 2)\n part2 = part2_top / part2_bottom\n sampling = part1 + part2 # size n\n # stage 2: randomized phase\n c = int(rank * np.log(rank))\n if c > n:\n sel_idx = np.arange(n)\n A_sel = A\n T = V_k.T\n else:\n probabilities = sampling / np.sum(sampling)\n sel_idx = np.random.choice(n, size=c, p=probabilities, replace=False)\n entries = 1 / np.sqrt(sampling[sel_idx])\n SD = np.zeros((n, c))\n SD[sel_idx, np.arange(c)] = entries\n A_sel = A[:, sel_idx] # d x c\n T = V_k.T @ SD # k x c\n # stage 3: deterministic phase\n fix_rank_sel_idx = maxvol(T.T, rank) # c x k => k x k\n cols = A_sel[:, fix_rank_sel_idx]\n final_sel_idx = sel_idx[fix_rank_sel_idx]\n return cols, final_sel_idx", "def set_uniform_Kk(self, clip=True):\n\t\t\n\t\tKk1_los = random_matrix([self.Mm], params=[self.lo_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk1_his = random_matrix([self.Mm], params=[self.hi_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk2_los = random_matrix([self.Mm], params=[self.lo_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\tKk2_his = random_matrix([self.Mm], params=[self.hi_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\t\n\t\tself.Kk1 = random_matrix([self.Mm, self.Nn], [Kk1_los, Kk1_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk1)\n\t\tself.Kk2 = random_matrix([self.Mm, self.Nn], [Kk2_los, Kk2_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk2)\n\t\t\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def s2(n, k):\n if n == 0 or n != 0 and n == k:\n return 1\n if k == 0 or n < k:\n return 0\n return k * s2(n-1, k) + s2(n-1, k-1)", "def apply_observation_matrix(self, x, k, result=None):\n\n i = 0\n\n # vector case\n ssm_indices = self.active_ssms(k)\n if len(x.shape)==1:\n r = 0\n for j in ssm_indices:\n ssm, scale = self.ssms[j], self.scales[j]\n state_size = ssm.max_dimension\n ri = ssm.apply_observation_matrix(x[i:i+state_size], k-self.ssm_starts[j])\n if scale is not None:\n r += ri * scale[k-self.ssm_starts[j]]\n else:\n r += ri\n i += state_size\n return r\n\n # matrix case\n else:\n assert(len(x.shape)==2)\n\n try:\n rr = self.tmp_arrays[len(result)]\n except KeyError:\n rr = np.empty((len(result),))\n self.tmp_arrays[len(result)] = rr\n\n result[:] = 0\n for j in ssm_indices:\n ssm, scale = self.ssms[j], self.scales[j]\n state_size = ssm.max_dimension\n ssm.apply_observation_matrix(x[i:i+state_size,:], k-self.ssm_starts[j], rr)\n #print \"TSSM step %d applying obs matrix on ssm %d state_size %d n %d scale %f result[0] %f\\n\" % (k, j, state_size, len(result), scale[k-self.ssm_starts[j]] if scale is not None else 1.0, rr[0])\n if scale is not None:\n rr *= scale[k-self.ssm_starts[j]]\n result += rr\n i += state_size", "def wrapped_kronecker(operator_1, operator_2):\n return scipy.sparse.kron(operator_1, operator_2, 'csc')", "def _rk2(self):\n\t\tx_old = self.x\n\t\tk1 = self.dxdt()\n\t\tself.x = x_old +k1*self.dt\n\t\tk2 = self.dxdt()\n\t\tx_new = x_old + (self.dt/2)*(k1+k2)\n\t\tself.x = x_new\n\t\tself.t+=self.dt", "def eval_K(self, S):\n K = (self.eigenfunctions[self.X] * S[None, :]) @ \\\n self.eigenfunctions[self.X].T # shape (n,n)\n return K", "def generate_ks(r_k, theta, kappa=1., psi=0., sym=6):\n W = rotation_matrix(np.deg2rad(theta))\n V = rotation_matrix(np.deg2rad(psi))\n D = scaling_matrix(1 / kappa)\n ks = np.stack([rotate(np.array([r_k, 0]), 2*np.pi/sym*i) for i in range(sym)]\n + [(0, 0)])\n ks = apply_transformation_matrix(ks, V.T @ D @ V @ W)\n return ks", "def sos_modal_forsparse(M, K, C=False, damp_diag=0.03, shift=1):\n\n\t#Kdiag = K.diagonal().reshape(-1,1)\n\n\t#Mdiag = M.diagonal().reshape(-1,1)\n\n\t#minvals = np.sort((Kdiag/Mdiag),axis=0)\n\n\t#shift = minvals[min(7,len(minvals))]\n\n\t#shift = shift[0]\n\n\t#K = ((K.tocsr() + (K.T).tocsr()).tolil()) * 0.5 + shift * ((M.tocsr() + (M.T).tocsr()).tolil()) * 0.5\n\n\t#M = ((M.tocsr() + (M.T).tocsr()).tolil()) * 0.5\n\n\tK = lil_matrix(K)\n\n\tM = lil_matrix(M)\n\n\tK = K + M * shift\n\n\t[lam, Psi] = la.eigh(K.toarray(), M.toarray())\n\n\tlam = lam.reshape(-1,1)\n\n\tomega = np.sqrt(np.abs(lam - shift))\n\n\tomega = omega.reshape(-1,)\n\n\tnorms = np.diag(1.0 / np.sqrt(np.diag(Psi.T@M@Psi)))\n\n\tPsi = Psi @ norms\n\n\tzeta = np.zeros_like(omega)\n\n\tif C is not False:\n\t\tdiagonalized_C = Psi.T@C@Psi\n\n\t\tdiagonal_C = np.diag(diagonalized_C)\n\n\t\tif min(omega) > 1e-5:\n\t\t\tzeta = diagonal_C / 2 / omega # error if omega = 0\n\t\t\tmax_off_diagonals = np.amax(np.abs(diagonalized_C\n\t\t\t\t\t\t\t\t\t\t\t - np.diag(diagonal_C)), axis=0)\n\t\t\t# error if no damping\n\t\t\tdamp_error = np.max(max_off_diagonals / diagonal_C)\n\t\telse:\n\t\t\tzeta = np.zeros_like(omega)\n\t\t\tdamp_error = np.zeros_like(omega)\n\t\t\tde_diag_C = diagonalized_C - np.diag(diagonal_C)\n\t\t\tfor mode_num, omega_i in enumerate(omega):\n\t\t\t\tif omega[mode_num] > 1e-5:\n\t\t\t\t\tzeta[mode_num] = diagonal_C[mode_num] / 2 / omega_i\n\t\t\t\t\tdamp_error = (np.max(np.abs(de_diag_C[:, mode_num]))\n\t\t\t\t\t\t\t\t / diagonal_C[mode_num])\n\n\t\tif damp_error > damp_diag:\n\t\t\tprint('Damping matrix cannot be completely diagonalized.')\n\t\t\tprint('Off diagonal error of {:4.0%}.'.format(damp_error))\n\n\treturn omega, zeta, Psi", "def SSPRK3(m):\n from sympy import sqrt, Rational\n one = Rational(1)\n\n n = sqrt(m)\n assert n==int(n), \"SSPRKm3 methods must have m=n^2\"\n assert m>=4, \"SSPRKm3 methods must have m>=4\"\n r = m - n\n alpha=np.vstack([snp.zeros(m),snp.eye(m)])\n alpha[n*(n+1)/2,n*(n+1)/2-1]=(n-one)/(2*n-one)\n beta=alpha/r\n alpha[n*(n+1)/2,(n-1)*(n-2)/2]=n/(2*n-one)\n name='SSPRK'+str(m)+'3'\n return ExplicitRungeKuttaMethod(alpha=alpha,beta=beta,name=name,shortname=name)" ]
[ "0.7740319", "0.5345925", "0.5307275", "0.5296073", "0.51972955", "0.51492083", "0.51247174", "0.5056189", "0.50554264", "0.5031491", "0.5031491", "0.50260574", "0.50148124", "0.49368647", "0.4909689", "0.48912278", "0.4885631", "0.48802838", "0.47982484", "0.4793419", "0.47846904", "0.47846568", "0.4773289", "0.47587737", "0.47548935", "0.47509918", "0.47444072", "0.47420385", "0.47389776", "0.47344804" ]
0.6716742
1
Computes a Cholesky factorization of sparse matrix. computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc_,aptrc_,asubc_,avalc_)
def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc_,aptrc_,asubc_,avalc_): n_ = None if n_ is None: n_ = len(anzc_) elif n_ != len(anzc_): raise IndexError("Inconsistent length of array anzc") if n_ is None: n_ = len(aptrc_) elif n_ != len(aptrc_): raise IndexError("Inconsistent length of array aptrc") if anzc_ is None: raise ValueError("Argument anzc cannot be None") if anzc_ is None: raise ValueError("Argument anzc may not be None") if isinstance(anzc_, numpy.ndarray) and anzc_.dtype is numpy.dtype(numpy.int32) and anzc_.flags.contiguous: _anzc_copyarray = False _anzc_tmp = ctypes.cast(anzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif anzc_ is not None: _anzc_copyarray = True _anzc_np_tmp = numpy.zeros(len(anzc_),numpy.dtype(numpy.int32)) _anzc_np_tmp[:] = anzc_ assert _anzc_np_tmp.flags.contiguous _anzc_tmp = ctypes.cast(_anzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _anzc_copyarray = False _anzc_tmp = None if aptrc_ is None: raise ValueError("Argument aptrc cannot be None") if aptrc_ is None: raise ValueError("Argument aptrc may not be None") if isinstance(aptrc_, numpy.ndarray) and aptrc_.dtype is numpy.dtype(numpy.int64) and aptrc_.flags.contiguous: _aptrc_copyarray = False _aptrc_tmp = ctypes.cast(aptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif aptrc_ is not None: _aptrc_copyarray = True _aptrc_np_tmp = numpy.zeros(len(aptrc_),numpy.dtype(numpy.int64)) _aptrc_np_tmp[:] = aptrc_ assert _aptrc_np_tmp.flags.contiguous _aptrc_tmp = ctypes.cast(_aptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _aptrc_copyarray = False _aptrc_tmp = None if asubc_ is None: raise ValueError("Argument asubc cannot be None") if asubc_ is None: raise ValueError("Argument asubc may not be None") if isinstance(asubc_, numpy.ndarray) and asubc_.dtype is numpy.dtype(numpy.int32) and asubc_.flags.contiguous: _asubc_copyarray = False _asubc_tmp = ctypes.cast(asubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif asubc_ is not None: _asubc_copyarray = True _asubc_np_tmp = numpy.zeros(len(asubc_),numpy.dtype(numpy.int32)) _asubc_np_tmp[:] = asubc_ assert _asubc_np_tmp.flags.contiguous _asubc_tmp = ctypes.cast(_asubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _asubc_copyarray = False _asubc_tmp = None if avalc_ is None: raise ValueError("Argument avalc cannot be None") if avalc_ is None: raise ValueError("Argument avalc may not be None") if isinstance(avalc_, numpy.ndarray) and avalc_.dtype is numpy.dtype(numpy.float64) and avalc_.flags.contiguous: _avalc_copyarray = False _avalc_tmp = ctypes.cast(avalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif avalc_ is not None: _avalc_copyarray = True _avalc_np_tmp = numpy.zeros(len(avalc_),numpy.dtype(numpy.float64)) _avalc_np_tmp[:] = avalc_ assert _avalc_np_tmp.flags.contiguous _avalc_tmp = ctypes.cast(_avalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _avalc_copyarray = False _avalc_tmp = None perm_ptr = ctypes.POINTER(ctypes.c_int32)() diag_ptr = ctypes.POINTER(ctypes.c_double)() lnzc_ptr = ctypes.POINTER(ctypes.c_int32)() lptrc_ptr = ctypes.POINTER(ctypes.c_int64)() lensubnval_ = ctypes.c_int64() lsubc_ptr = ctypes.POINTER(ctypes.c_int32)() lvalc_ptr = ctypes.POINTER(ctypes.c_double)() res = __library__.MSK_XX_computesparsecholesky(self.__nativep,multithread_,ordermethod_,tolsingular_,n_,_anzc_tmp,_aptrc_tmp,_asubc_tmp,_avalc_tmp,ctypes.byref(perm_ptr),ctypes.byref(diag_ptr),ctypes.byref(lnzc_ptr),ctypes.byref(lptrc_ptr),ctypes.byref(lensubnval_),ctypes.byref(lsubc_ptr),ctypes.byref(lvalc_ptr)) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) perm_arr = perm_ptr[0:n_] __library__.MSK_XX_freeenv(self.__nativep,perm_ptr) diag_arr = diag_ptr[0:n_] __library__.MSK_XX_freeenv(self.__nativep,diag_ptr) lnzc_arr = lnzc_ptr[0:n_] __library__.MSK_XX_freeenv(self.__nativep,lnzc_ptr) lptrc_arr = lptrc_ptr[0:n_] __library__.MSK_XX_freeenv(self.__nativep,lptrc_ptr) lensubnval_ = lensubnval_.value _lensubnval_return_value = lensubnval_ lsubc_arr = lsubc_ptr[0:lensubnval_] __library__.MSK_XX_freeenv(self.__nativep,lsubc_ptr) lvalc_arr = lvalc_ptr[0:lensubnval_] __library__.MSK_XX_freeenv(self.__nativep,lvalc_ptr) return (perm_arr,diag_arr,lnzc_arr,lptrc_arr,_lensubnval_return_value,lsubc_arr,lvalc_arr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc,aptrc,asubc,avalc): # 3\n n_ = None\n if n_ is None:\n n_ = len(anzc)\n elif n_ != len(anzc):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc)\n elif n_ != len(aptrc):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if n_ is None: n_ = 0\n if anzc is None: raise TypeError(\"Invalid type for argument anzc\")\n if anzc is None:\n anzc_ = None\n else:\n try:\n anzc_ = memoryview(anzc)\n except TypeError:\n try:\n _tmparr_anzc = array.array(\"i\",anzc)\n except TypeError:\n raise TypeError(\"Argument anzc has wrong type\")\n else:\n anzc_ = memoryview(_tmparr_anzc)\n \n else:\n if anzc_.format != \"i\":\n anzc_ = memoryview(array.array(\"i\",anzc))\n \n if aptrc is None: raise TypeError(\"Invalid type for argument aptrc\")\n if aptrc is None:\n aptrc_ = None\n else:\n try:\n aptrc_ = memoryview(aptrc)\n except TypeError:\n try:\n _tmparr_aptrc = array.array(\"q\",aptrc)\n except TypeError:\n raise TypeError(\"Argument aptrc has wrong type\")\n else:\n aptrc_ = memoryview(_tmparr_aptrc)\n \n else:\n if aptrc_.format != \"q\":\n aptrc_ = memoryview(array.array(\"q\",aptrc))\n \n if asubc is None: raise TypeError(\"Invalid type for argument asubc\")\n if asubc is None:\n asubc_ = None\n else:\n try:\n asubc_ = memoryview(asubc)\n except TypeError:\n try:\n _tmparr_asubc = array.array(\"i\",asubc)\n except TypeError:\n raise TypeError(\"Argument asubc has wrong type\")\n else:\n asubc_ = memoryview(_tmparr_asubc)\n \n else:\n if asubc_.format != \"i\":\n asubc_ = memoryview(array.array(\"i\",asubc))\n \n if avalc is None: raise TypeError(\"Invalid type for argument avalc\")\n if avalc is None:\n avalc_ = None\n else:\n try:\n avalc_ = memoryview(avalc)\n except TypeError:\n try:\n _tmparr_avalc = array.array(\"d\",avalc)\n except TypeError:\n raise TypeError(\"Argument avalc has wrong type\")\n else:\n avalc_ = memoryview(_tmparr_avalc)\n \n else:\n if avalc_.format != \"d\":\n avalc_ = memoryview(array.array(\"d\",avalc))\n \n res,resargs = self.__obj.computesparsecholesky(multithread_,ordermethod_,tolsingular_,n_,anzc_,aptrc_,asubc_,avalc_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc = resargs\n return _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc", "def read_qmcpack_sparse(filename, get_chol=True):\n with h5py.File(filename, 'r') as fh5:\n real_ints = False\n enuc = fh5['Hamiltonian/Energies'][:][0]\n dims = fh5['Hamiltonian/dims'][:]\n chunks = dims[2]\n nmo = dims[3]\n nalpha = dims[4]\n nbeta = dims[5]\n nchol = dims[7]\n try:\n hcore = fh5['Hamiltonian/hcore'][:]\n hcore = hcore.view(numpy.complex128).reshape(nmo,nmo)\n except KeyError:\n # Old sparse format.\n hcore = fh5['Hamiltonian/H1'][:].view(numpy.complex128).ravel()\n idx = fh5['Hamiltonian/H1_indx'][:]\n row_ix = idx[::2]\n col_ix = idx[1::2]\n hcore = scipy.sparse.csr_matrix((hcore, (row_ix, col_ix))).toarray()\n hcore = numpy.tril(hcore, -1) + numpy.tril(hcore, 0).conj().T\n except ValueError:\n # Real format.\n hcore = fh5['Hamiltonian/hcore'][:]\n real_ints = True\n if get_chol:\n chol_vecs = read_cholesky(filename, real_ints=real_ints)\n else:\n chol_vecs = None\n return (hcore, chol_vecs, enuc, int(nmo), (int(nalpha), int(nbeta)))", "def cholesky(matrix_x):\n n = len(matrix_x)\n\n # initialize _L matrix (lower triangular matrix)\n _L = [[0.0] * n for i in xrange(n)]\n\n # Perform the Cholesky decomposition\n for i in xrange(n):\n for k in xrange(i+1):\n tmp_sum = sum(_L[i][j] * _L[k][j] for j in xrange(k))\n \n if (i == k): # Diagonal elements\n _L[i][k] = sqrt(matrix_x[i][i] - tmp_sum)\n else:\n _L[i][k] = (1.0 / _L[k][k] * (matrix_x[i][k] - tmp_sum))\n return _L", "def make_sparse(self, fmt='csc', make_method=None):\n if make_method:\n self.sparse = make_method(self.hamiltonian)\n else:\n self.sparse = self.hamiltonian.to_matrix(sparse=fmt)", "def calculate3(pred_ccm, pred_ad, truth_ccm, truth_ad, method=\"sym_pseudoV\", weights=None, verbose=False, pseudo_counts=True, full_matrix=True, in_mat=2):\n larger_is_worse_methods = ['sym_pseudoV_nc', 'sym_pseudoV', 'pseudoV_nc', 'pseudoV', \"simpleKL_nc\", 'simpleKL'] # methods where a larger score is worse\n\n \n pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad = pred_ccm, pred_ad, truth_ccm, truth_ad\n y = np.array(pc_pred_ad.shape)[1]\n nssms = int(np.ceil(0.5 * (2*y + 1) - 0.5 * np.sqrt(4*y + 1)))\n\n if isinstance(method, list):\n res = [calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, in_mat=in_mat) for m in method] # calculate the score for each method\n\n # normalize the scores to be between (worst of NCluster score and OneCluster score) and (Truth score)\n ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', nssms=nssms), mb.get_ad('NClusterOneLineage', nssms=nssms))\n ncluster_score = [calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]\n del ncluster_ccm, ncluster_ad\n onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', nssms=nssms), mb.get_ad('OneCluster', nssms=nssms))\n onecluster_score = [calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]\n del onecluster_ccm, onecluster_ad\n for i in range(len(method)):\n if method[i] in larger_is_worse_methods: # normalization for methods where a larger score is worse\n worst_score = max(ncluster_score[i], onecluster_score[i]) # worst of NCluster and OneCluster scores\n res[i] = 1 - (res[i] / worst_score) # normalize the score\n else: # normalization for methods where a smaller score is worse\n worst_score = min(ncluster_score[i], onecluster_score[i])\n res[i] = (res[i] - worst_score) / (1 - worst_score)\n\n\n if weights is None: # if weights are not specified or if they cannot be normalized then default to equal weights\n weights = [1] * len(method)\n elif sum(weights) == 0:\n Warning('Weights sum to zero so they are invalid, defaulting to equal weights')\n weights = [1] * len(method)\n\n weights = np.array(weights) / float(sum(weights)) # normalize the weights\n score = sum(np.multiply(res, weights))\n else:\n \n score = calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del pc_pred_ccm\n del pc_pred_ad\n # normalize the score to be between (worst of NCluster score and OneCluster score) and (Truth score) - similar to above\n ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', nssms=nssms), mb.get_ad('NClusterOneLineage', nssms=nssms))\n ncluster_score = calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del ncluster_ccm, ncluster_ad\n onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', nssms=nssms), mb.get_ad('OneCluster', nssms=nssms))\n \n onecluster_score = calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del onecluster_ccm, onecluster_ad\n\n #print score, ncluster_score, onecluster_score\n if method in larger_is_worse_methods:\n worst_score = max(ncluster_score, onecluster_score)\n score = 1 - (score / worst_score)\n else:\n worst_score = min(ncluster_score, onecluster_score)\n score = (score - worst_score) / (1 - worst_score)\n return score", "def cholesky(input):\n is_input_dparray = isinstance(input, dparray)\n\n if not use_origin_backend(input) and is_input_dparray and input.ndim == 2 and \\\n input.shape[0] == input.shape[1] and input.shape[0] > 0:\n result = dpnp_cholesky(input)\n\n return result\n\n return call_origin(numpy.linalg.cholesky, input)", "def cholesky(A):\n n = len(A)\n\n # Create zero matrix for L\n L=np.zeros((n,n))\n\n # Perform the Cholesky decomposition\n for i in range(n):\n for k in range(i+1):\n tmp_sum = sum(L[i][j] * L[k][j] for j in xrange(k))\n \n if (i == k): # Diagonal elements\n\n L[i][k] = math.sqrt(A[i][i] - tmp_sum)\n else:\n\n L[i][k] = (1.0 / L[k][k] * (A[i][k] - tmp_sum))\n return L", "def analyze_sensitivity_sparse_grid(sparse_grid,max_order=2):\n from pyapprox.multivariate_polynomials import \\\n define_poly_options_from_variable_transformation\n from pyapprox.adaptive_sparse_grid import \\\n convert_sparse_grid_to_polynomial_chaos_expansion\n pce_opts=define_poly_options_from_variable_transformation(\n sparse_grid.variable_transformation)\n pce = convert_sparse_grid_to_polynomial_chaos_expansion(\n sparse_grid,pce_opts)\n pce_main_effects,pce_total_effects=\\\n get_main_and_total_effect_indices_from_pce(\n pce.get_coefficients(),pce.get_indices())\n\n interaction_terms, pce_sobol_indices = get_sobol_indices(\n pce.get_coefficients(),pce.get_indices(),max_order=max_order)\n \n return SensivitityResult(\n {'main_effects':pce_main_effects,\n 'total_effects':pce_total_effects,\n 'sobol_indices':pce_sobol_indices,\n 'sobol_interaction_indices':interaction_terms,\n 'pce':pce})", "def test_csm_unsorted(self):\r\n sp_types = {'csc': sp.csc_matrix,\r\n 'csr': sp.csr_matrix}\r\n\r\n for format in ['csr', 'csc', ]:\r\n for dtype in ['float32', 'float64']:\r\n x = tensor.tensor(dtype=dtype, broadcastable=(False,))\r\n y = tensor.ivector()\r\n z = tensor.ivector()\r\n s = tensor.ivector()\r\n # Sparse advanced indexing produces unsorted sparse matrices\r\n a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,\r\n unsorted_indices=True)[1][0]\r\n # Make sure it's unsorted\r\n assert not a.has_sorted_indices\r\n def my_op(x):\r\n y = tensor.constant(a.indices)\r\n z = tensor.constant(a.indptr)\r\n s = tensor.constant(a.shape)\r\n return tensor.sum(\r\n dense_from_sparse(CSM(format)(x, y, z, s) * a))\r\n verify_grad_sparse(my_op, [a.data])", "def calc_cmatrix(self):\n tw = self.twiss_df\n res = self._results_df\n\n LOG.debug(\"Calculating CMatrix.\")\n with timeit(lambda t:\n LOG.debug(\" CMatrix calculated in {:f}s\".format(t))):\n\n j = np.array([[0., 1.],\n [-1., 0.]])\n rs = np.reshape(tw.as_matrix(columns=[\"R11\", \"R12\",\n \"R21\", \"R22\"]),\n (len(tw), 2, 2))\n cs = np.einsum(\"ij,kjn,no->kio\",\n -j, np.transpose(rs, axes=(0, 2, 1)), j)\n cs = np.einsum(\"k,kij->kij\", (1 / np.sqrt(1 + np.linalg.det(rs))), cs)\n\n g11a = 1 / np.sqrt(tw.loc[:, \"BETX\"])\n g12a = np.zeros(len(tw))\n g21a = tw.loc[:, \"ALFX\"] / np.sqrt(tw.loc[:, \"BETX\"])\n g22a = np.sqrt(tw.loc[:, \"BETX\"])\n gas = np.reshape(np.array([g11a, g12a,\n g21a, g22a]).T,\n (len(tw), 2, 2))\n\n ig11b = np.sqrt(tw.loc[:, \"BETY\"])\n ig12b = np.zeros(len(tw))\n ig21b = -tw.loc[:, \"ALFY\"] / np.sqrt(tw.loc[:, \"BETY\"])\n ig22b = 1. / np.sqrt(tw.loc[:, \"BETY\"])\n igbs = np.reshape(np.array([ig11b, ig12b,\n ig21b, ig22b]).T,\n (len(tw), 2, 2))\n cs = np.einsum(\"kij,kjl,kln->kin\", gas, cs, igbs)\n gammas = np.sqrt(1 - np.linalg.det(cs))\n\n res.loc[:, \"GAMMA_C\"] = gammas\n\n res.loc[:, \"F1001_C\"] = ((cs[:, 0, 0] + cs[:, 1, 1]) * 1j +\n (cs[:, 0, 1] - cs[:, 1, 0])) / 4 / gammas\n res.loc[:, \"F1010_C\"] = ((cs[:, 0, 0] - cs[:, 1, 1]) * 1j +\n (-cs[:, 0, 1]) - cs[:, 1, 0]) / 4 / gammas\n\n res.loc[:, \"C11\"] = cs[:, 0, 0]\n res.loc[:, \"C12\"] = cs[:, 0, 1]\n res.loc[:, \"C21\"] = cs[:, 1, 0]\n res.loc[:, \"C22\"] = cs[:, 1, 1]\n\n LOG.debug(\" Average coupling amplitude |F1001|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1001_C\"]))))\n LOG.debug(\" Average coupling amplitude |F1010|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1010_C\"]))))\n LOG.debug(\" Average gamma: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"GAMMA_C\"]))))\n\n self._log_added('GAMMA_C', 'F1001_C', 'F1010_C', 'C11', 'C12', 'C21', 'C22')", "def cp_sparse(tensor, rank, penalties, nonneg=False, init=None, warmstart=True,\n tol=1e-6, min_time=0, max_time=np.inf, n_iter_max=1000, print_every=0.3,\n prepend_print='\\r', append_print=''):\n\n # default initialization method\n if init is None:\n init = 'randn' if nonneg is False else 'rand'\n\n # initialize factors\n if warmstart:\n factors, _ = cp_als(tensor, rank, nonneg=nonneg, tol=tol)\n else:\n factors = _cp_initialize(tensor, rank, init)\n\n def _compute_penalty(_factors):\n return np.sum([lam*np.sum(np.abs(f)) for lam, f in zip(penalties, _factors)])\n\n # setup optimization\n converged = False\n norm_tensor = tensorly.tenalg.norm(tensor, 2)\n t_elapsed = [0]\n obj_history = [_compute_squared_recon_error(tensor, factors, norm_tensor) + _compute_penalty(factors)]\n\n # initial print statement\n verbose = print_every > 0\n print_counter = 0 # time to print next progress\n if verbose:\n print(prepend_print+'iter=0, error={0:.4f}'.format(obj_history[-1]), end=append_print)\n\n # gradient descent params\n linesearch_iters = 100\n\n # main loop\n t0 = time()\n for iteration in range(n_iter_max):\n\n # alternating optimization over modes\n for mode in range(tensor.ndim):\n # current optimization state\n stepsize = 1.0\n old_obj = obj_history[-1]\n fctr = factors[mode].copy()\n\n # keep track of positive and negative elements\n if not nonneg:\n pos = fctr > 0\n neg = fctr < 0\n\n # form unfolding and khatri-rao product\n unf = unfold(tensor, mode)\n kr = khatri_rao(factors, skip_matrix=mode)\n\n # calculate gradient\n kr_t_kr = np.dot(kr.T, kr)\n gradient = np.dot(fctr, kr_t_kr) - np.dot(unf, kr)\n\n # proximal gradient update\n new_obj = np.inf\n\n for liter in range(linesearch_iters):\n # take gradient step\n new_fctr = fctr - stepsize*gradient\n\n # iterative soft-thresholding\n if nonneg:\n new_fctr -= stepsize*penalties[mode]\n new_fctr[new_fctr<0] = 0.0\n else:\n new_fctr[pos] -= stepsize*penalties[mode]\n new_fctr[neg] += stepsize*penalties[mode]\n sign_changes = (new_factor > 0 & neg) | (new_factor < 0 & pos)\n new_fctr[sign_changes] = 0.0\n\n # calculate new error\n factors[mode] = new_fctr\n new_obj = _compute_squared_recon_error(tensor, factors, norm_tensor) + _compute_penalty(factors)\n\n # break if error went down\n if new_obj < old_obj:\n factors[mode] = new_fctr\n break\n # decrease step size if error went up\n else:\n stepsize /= 2.0\n # give up if too many iterations\n if liter == (linesearch_iters - 1):\n factors[mode] = fctr\n new_obj = old_obj\n\n # renormalize factors\n factors = standardize_factors(factors, sort_factors=False)\n\n # check convergence\n t_elapsed.append(time() - t0)\n obj_history.append(new_obj)\n\n # break loop if converged\n converged = abs(obj_history[-2] - obj_history[-1]) < tol\n if converged and (time()-t0)>min_time:\n if verbose: print(prepend_print+'converged in {} iterations.'.format(iteration+1), end=append_print)\n break\n\n # display progress\n if verbose and (time()-t0)/print_every > print_counter:\n print_str = 'iter={0:d}, error={1:.4f}, variation={2:.4f}'.format(\n iteration+1, obj_history[-1], obj_history[-2] - obj_history[-1])\n print(prepend_print+print_str, end=append_print)\n print_counter += print_every\n\n # stop early if over time\n if (time()-t0)>max_time:\n break\n\n if not converged and verbose:\n print('gave up after {} iterations and {} seconds'.format(iteration, time()-t0), end=append_print)\n\n # return optimized factors and info\n return factors, { 'err_hist' : obj_history,\n 't_hist' : t_elapsed,\n 'err_final' : obj_history[-1],\n 'converged' : converged,\n 'iterations' : len(obj_history) }", "def __call__(self, sparse_matrix: PipelinedRDD):\n rows = sparse_matrix.collect()\n\n mat_index, mat_weights = zip(*rows)\n mat_row, mat_col = zip(*mat_index)\n tokens_num = len(self.tokens_list)\n\n self._log.info(\"Building matrix...\")\n matrix = sparse.coo_matrix((mat_weights, (mat_row, mat_col)),\n shape=(tokens_num, tokens_num))\n Cooccurrences() \\\n .construct(self.tokens_list, matrix) \\\n .save(output=self.output, series=\"id2vec\", deps=(self.df_model,))", "def eval_sparse(self, array_in, array_out, sp_matrix=None):\n if sp_matrix is None:\n sp_matrix = self.to_sparse_matrix(array_in.shape, \"csc\")\n # print(\"usually:\", sp_matrix.todense())\n array_out[:] = sp_matrix.dot(array_in.reshape(-1)).reshape(array_out.shape)", "def cholesky_numpy(M, cholesky):\r\n n = M.shape[0]\r\n\r\n # Perform the Cholesky decomposition\r\n for i in range(n):\r\n for j in range(i+1):\r\n val = M[i, j] - np.dot(cholesky[i, :j], cholesky[j, :j] )\r\n\r\n if (i == j): # Calculate diagonal elements\r\n cholesky[i, j] = sqrt(val)\r\n else: # Calculate below-diagonal elements\r\n cholesky[i, j] = (val / cholesky[j, j])\r\n return", "def timesCroot(self, mat):\r\n print(\"WARNING: timesCroot is not yet tested\")\r\n if self.opts['CMA_diagonal'] is True \\\r\n or self.countiter <= self.opts['CMA_diagonal']:\r\n res = (self._Croot * mat.T).T\r\n else:\r\n res = np.dot(self._Croot, mat)\r\n return res", "def precondition_sparse_matrix(A: lil_matrix) -> linalg.LinearOperator:\n ilu = linalg.spilu(A)\n Mx = ilu.solve\n return linalg.LinearOperator(A.shape, Mx)", "def _solve_cg(lap_sparse, B, tol, return_full_prob=False):\n lap_sparse = lap_sparse.tocsc()\n X = []\n for i in range(len(B)):\n x0 = cg(lap_sparse, -B[i].toarray(), tol=tol)[0]\n X.append(x0)\n if not return_full_prob:\n X = np.array(X)\n X = np.argmax(X, axis=0)\n return X", "def test_csm_sparser(self):\r\n sp_types = {'csc': sp.csc_matrix,\r\n 'csr': sp.csr_matrix}\r\n\r\n for format in ['csc', 'csr']:\r\n for dtype in ['float32', 'float64']:\r\n x = tensor.tensor(dtype=dtype, broadcastable=(False,))\r\n y = tensor.ivector()\r\n z = tensor.ivector()\r\n s = tensor.ivector()\r\n\r\n a = as_sparse_variable(sp_types[format](random_lil((4, 3),\r\n dtype, 1)))\r\n\r\n f = theano.function([x, y, z, s],\r\n tensor.grad(dense_from_sparse(\r\n a * CSM(format)(x, y, z, s)).sum(), x))\r\n\r\n spmat = sp_types[format](random_lil((4, 3), dtype, 3))\r\n\r\n res = f(spmat.data, spmat.indices, spmat.indptr,\r\n numpy.asarray(spmat.shape, 'int32'))\r\n\r\n assert len(spmat.data) == len(res)", "def sparse_options(default_solver='spsolve',\n default_least_squares_solver='least_squares_lsmr' if HAVE_SCIPY_LSMR else 'least_squares_generic_lsmr',\n bicgstab_tol=1e-15,\n bicgstab_maxiter=None,\n spilu_drop_tol=1e-4,\n spilu_fill_factor=10,\n spilu_drop_rule='basic,area',\n spilu_permc_spec='COLAMD',\n spsolve_permc_spec='COLAMD',\n spsolve_keep_factorization=True,\n lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False,\n pyamg_tol=1e-5,\n pyamg_maxiter=400,\n pyamg_verb=False,\n pyamg_rs_strength=('classical', {'theta': 0.25}),\n pyamg_rs_CF='RS',\n pyamg_rs_presmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_max_levels=10,\n pyamg_rs_max_coarse=500,\n pyamg_rs_coarse_solver='pinv2',\n pyamg_rs_cycle='V',\n pyamg_rs_accel=None,\n pyamg_rs_tol=1e-5,\n pyamg_rs_maxiter=100,\n pyamg_sa_symmetry='hermitian',\n pyamg_sa_strength='symmetric',\n pyamg_sa_aggregate='standard',\n pyamg_sa_smooth=('jacobi', {'omega': 4.0/3.0}),\n pyamg_sa_presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_improve_candidates=[('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 4}), None],\n pyamg_sa_max_levels=10,\n pyamg_sa_max_coarse=500,\n pyamg_sa_diagonal_dominance=False,\n pyamg_sa_coarse_solver='pinv2',\n pyamg_sa_cycle='V',\n pyamg_sa_accel=None,\n pyamg_sa_tol=1e-5,\n pyamg_sa_maxiter=100):\n\n assert default_least_squares_solver.startswith('least_squares')\n\n opts = (('bicgstab_spilu', {'type': 'bicgstab_spilu',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter,\n 'spilu_drop_tol': spilu_drop_tol,\n 'spilu_fill_factor': spilu_fill_factor,\n 'spilu_drop_rule': spilu_drop_rule,\n 'spilu_permc_spec': spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': spsolve_permc_spec,\n 'keep_factorization': spsolve_keep_factorization}),\n ('lgmres', {'type': 'lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k}),\n ('least_squares_lsqr', {'type': 'least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}))\n\n if HAVE_SCIPY_LSMR:\n opts += (('least_squares_lsmr', {'type': 'least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show}),)\n\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': pyamg_tol,\n 'maxiter': pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': pyamg_rs_strength,\n 'CF': pyamg_rs_CF,\n 'presmoother': pyamg_rs_presmoother,\n 'postsmoother': pyamg_rs_postsmoother,\n 'max_levels': pyamg_rs_max_levels,\n 'max_coarse': pyamg_rs_max_coarse,\n 'coarse_solver': pyamg_rs_coarse_solver,\n 'cycle': pyamg_rs_cycle,\n 'accel': pyamg_rs_accel,\n 'tol': pyamg_rs_tol,\n 'maxiter': pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': pyamg_sa_symmetry,\n 'strength': pyamg_sa_strength,\n 'aggregate': pyamg_sa_aggregate,\n 'smooth': pyamg_sa_smooth,\n 'presmoother': pyamg_sa_presmoother,\n 'postsmoother': pyamg_sa_postsmoother,\n 'improve_candidates': pyamg_sa_improve_candidates,\n 'max_levels': pyamg_sa_max_levels,\n 'max_coarse': pyamg_sa_max_coarse,\n 'diagonal_dominance': pyamg_sa_diagonal_dominance,\n 'coarse_solver': pyamg_sa_coarse_solver,\n 'cycle': pyamg_sa_cycle,\n 'accel': pyamg_sa_accel,\n 'tol': pyamg_sa_tol,\n 'maxiter': pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n opts.update(genericsolvers.options())\n def_opt = opts.pop(default_solver)\n if default_least_squares_solver != default_solver:\n def_ls_opt = opts.pop(default_least_squares_solver)\n ordered_opts = OrderedDict(((default_solver, def_opt),\n (default_least_squares_solver, def_ls_opt)))\n else:\n ordered_opts = OrderedDict(((default_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts", "def test_cholesky_banded_lower_scipy_test(self):\r\n # Symmetric positive definite banded matrix `a`\r\n a = np.array([[4.0, 1.0, 0.0, 0.0],\r\n [1.0, 4.0, 0.5, 0.0],\r\n [0.0, 0.5, 4.0, 0.2],\r\n [0.0, 0.0, 0.2, 4.0]])\r\n # Banded storage form of `a`.\r\n ab = np.array([[4.0, 4.0, 4.0, 4.0],\r\n [1.0, 0.5, 0.2, -1.0]])\r\n c = bla._cholesky_banded(ab, lower=True)\r\n lfac = np.zeros_like(a)\r\n lfac[range(4), range(4)] = c[0]\r\n lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]\r\n assert_allclose(a, np.dot(lfac, lfac.T))", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def to_sparse(self, method='csr_matrix'):\r\n data = self.data.values\r\n if method == 'csr_matrix':\r\n data_sp = sps.csr_matrix(data)\r\n elif method == 'bsr_matrix':\r\n data_sp = sps.bsr_matrix(data)\r\n elif method == 'coo_matrix':\r\n data_sp = sps.coo_matrix(data)\r\n elif method == 'csc_matrix':\r\n data_sp = sps.csc_matrix(data)\r\n elif method == 'dia_matrix':\r\n data_sp = sps.dia_matrix(data)\r\n elif method == 'dok_matrix':\r\n data_sp = sps.dok_matrix(data)\r\n elif method == 'lil_matrix':\r\n data_sp = sps.lil_matrix(data)\r\n else:\r\n raise ValueError('The method does not exist in scipy.sparse')\r\n return data_sp", "def test_to_sparse(self, fn_name, fn_args, proto_list_key):\n self.run_benchmarks(fn_name, _get_prensor_to_sparse_tensor_fn, fn_args,\n proto_list_key)", "def run_PCA_long(self, sparse_matrix):\n\n pca_explained = np.cumsum(PCA().fit(sparse_matrix).explained_variance_ratio_)\n pca_explainedby = np.where(pca_explained>=0.9)[0][0]\n pca = PCA(n_components=pca_explainedby)\n pca.fit(sparse_matrix)\n \n today = datetime.date.today()\n filename = 'sparse_long_pca_model.pkl'\n joblib.dump(pca, filename)\n \n return pca.transform(sparse_matrix), pca", "def _create_mkl_sparse(matrix):\n\n double_precision = _is_double(matrix)\n\n # Figure out which matrix creation function to use\n if _spsparse.isspmatrix_csr(matrix):\n _check_scipy_index_typing(matrix)\n assert matrix.data.shape[0] == matrix.indices.shape[0]\n assert matrix.indptr.shape[0] == matrix.shape[0] + 1\n handle_func = MKL._mkl_sparse_d_create_csr if double_precision else MKL._mkl_sparse_s_create_csr\n\n elif _spsparse.isspmatrix_csc(matrix):\n _check_scipy_index_typing(matrix)\n assert matrix.data.shape[0] == matrix.indices.shape[0]\n assert matrix.indptr.shape[0] == matrix.shape[1] + 1\n handle_func = MKL._mkl_sparse_d_create_csc if double_precision else MKL._mkl_sparse_s_create_csc\n\n elif _spsparse.isspmatrix_bsr(matrix):\n _check_scipy_index_typing(matrix)\n return _create_mkl_sparse_bsr(matrix), double_precision\n\n else:\n raise ValueError(\"Matrix is not CSC, CSR, or BSR\")\n\n return _pass_mkl_handle_csr_csc(matrix, handle_func), double_precision", "def sparsetriangularsolvedense(self,transposed_,lnzc_,lptrc_,lsubc_,lvalc_,b_):\n n_ = None\n if n_ is None:\n n_ = len(b_)\n elif n_ != len(b_):\n raise IndexError(\"Inconsistent length of array b\")\n if n_ is None:\n n_ = len(lnzc_)\n elif n_ != len(lnzc_):\n raise IndexError(\"Inconsistent length of array lnzc\")\n if n_ is None:\n n_ = len(lptrc_)\n elif n_ != len(lptrc_):\n raise IndexError(\"Inconsistent length of array lptrc\")\n _lnzc_minlength = (n_)\n if (n_) > 0 and lnzc_ is not None and len(lnzc_) != (n_):\n raise ValueError(\"Array argument lnzc is not long enough: Is %d, expected %d\" % (len(lnzc_),(n_)))\n if lnzc_ is None:\n raise ValueError(\"Argument lnzc cannot be None\")\n if lnzc_ is None:\n raise ValueError(\"Argument lnzc may not be None\")\n if isinstance(lnzc_, numpy.ndarray) and lnzc_.dtype is numpy.dtype(numpy.int32) and lnzc_.flags.contiguous:\n _lnzc_copyarray = False\n _lnzc_tmp = ctypes.cast(lnzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif lnzc_ is not None:\n _lnzc_copyarray = True\n _lnzc_np_tmp = numpy.zeros(len(lnzc_),numpy.dtype(numpy.int32))\n _lnzc_np_tmp[:] = lnzc_\n assert _lnzc_np_tmp.flags.contiguous\n _lnzc_tmp = ctypes.cast(_lnzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _lnzc_copyarray = False\n _lnzc_tmp = None\n \n _lptrc_minlength = (n_)\n if (n_) > 0 and lptrc_ is not None and len(lptrc_) != (n_):\n raise ValueError(\"Array argument lptrc is not long enough: Is %d, expected %d\" % (len(lptrc_),(n_)))\n if lptrc_ is None:\n raise ValueError(\"Argument lptrc cannot be None\")\n if lptrc_ is None:\n raise ValueError(\"Argument lptrc may not be None\")\n if isinstance(lptrc_, numpy.ndarray) and lptrc_.dtype is numpy.dtype(numpy.int64) and lptrc_.flags.contiguous:\n _lptrc_copyarray = False\n _lptrc_tmp = ctypes.cast(lptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif lptrc_ is not None:\n _lptrc_copyarray = True\n _lptrc_np_tmp = numpy.zeros(len(lptrc_),numpy.dtype(numpy.int64))\n _lptrc_np_tmp[:] = lptrc_\n assert _lptrc_np_tmp.flags.contiguous\n _lptrc_tmp = ctypes.cast(_lptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _lptrc_copyarray = False\n _lptrc_tmp = None\n \n lensubnval_ = None\n if lensubnval_ is None:\n lensubnval_ = len(lsubc_)\n elif lensubnval_ != len(lsubc_):\n raise IndexError(\"Inconsistent length of array lsubc\")\n if lensubnval_ is None:\n lensubnval_ = len(lvalc_)\n elif lensubnval_ != len(lvalc_):\n raise IndexError(\"Inconsistent length of array lvalc\")\n _lsubc_minlength = (lensubnval_)\n if (lensubnval_) > 0 and lsubc_ is not None and len(lsubc_) != (lensubnval_):\n raise ValueError(\"Array argument lsubc is not long enough: Is %d, expected %d\" % (len(lsubc_),(lensubnval_)))\n if lsubc_ is None:\n raise ValueError(\"Argument lsubc cannot be None\")\n if lsubc_ is None:\n raise ValueError(\"Argument lsubc may not be None\")\n if isinstance(lsubc_, numpy.ndarray) and lsubc_.dtype is numpy.dtype(numpy.int32) and lsubc_.flags.contiguous:\n _lsubc_copyarray = False\n _lsubc_tmp = ctypes.cast(lsubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif lsubc_ is not None:\n _lsubc_copyarray = True\n _lsubc_np_tmp = numpy.zeros(len(lsubc_),numpy.dtype(numpy.int32))\n _lsubc_np_tmp[:] = lsubc_\n assert _lsubc_np_tmp.flags.contiguous\n _lsubc_tmp = ctypes.cast(_lsubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _lsubc_copyarray = False\n _lsubc_tmp = None\n \n _lvalc_minlength = (lensubnval_)\n if (lensubnval_) > 0 and lvalc_ is not None and len(lvalc_) != (lensubnval_):\n raise ValueError(\"Array argument lvalc is not long enough: Is %d, expected %d\" % (len(lvalc_),(lensubnval_)))\n if lvalc_ is None:\n raise ValueError(\"Argument lvalc cannot be None\")\n if lvalc_ is None:\n raise ValueError(\"Argument lvalc may not be None\")\n if isinstance(lvalc_, numpy.ndarray) and lvalc_.dtype is numpy.dtype(numpy.float64) and lvalc_.flags.contiguous:\n _lvalc_copyarray = False\n _lvalc_tmp = ctypes.cast(lvalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif lvalc_ is not None:\n _lvalc_copyarray = True\n _lvalc_np_tmp = numpy.zeros(len(lvalc_),numpy.dtype(numpy.float64))\n _lvalc_np_tmp[:] = lvalc_\n assert _lvalc_np_tmp.flags.contiguous\n _lvalc_tmp = ctypes.cast(_lvalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _lvalc_copyarray = False\n _lvalc_tmp = None\n \n _b_minlength = (n_)\n if (n_) > 0 and b_ is not None and len(b_) != (n_):\n raise ValueError(\"Array argument b is not long enough: Is %d, expected %d\" % (len(b_),(n_)))\n if isinstance(b_,numpy.ndarray) and not b_.flags.writeable:\n raise ValueError(\"Argument b must be writable\")\n if b_ is None:\n raise ValueError(\"Argument b may not be None\")\n if isinstance(b_, numpy.ndarray) and b_.dtype is numpy.dtype(numpy.float64) and b_.flags.contiguous:\n _b_copyarray = False\n _b_tmp = ctypes.cast(b_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif b_ is not None:\n _b_copyarray = True\n _b_np_tmp = numpy.zeros(len(b_),numpy.dtype(numpy.float64))\n _b_np_tmp[:] = b_\n assert _b_np_tmp.flags.contiguous\n _b_tmp = ctypes.cast(_b_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _b_copyarray = False\n _b_tmp = None\n \n res = __library__.MSK_XX_sparsetriangularsolvedense(self.__nativep,transposed_,n_,_lnzc_tmp,_lptrc_tmp,lensubnval_,_lsubc_tmp,_lvalc_tmp,_b_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _b_copyarray:\n b_[:] = _b_np_tmp", "def run_PCA(self, sparse_matrix):\n\n pca_explained = np.cumsum(PCA().fit(sparse_matrix).explained_variance_ratio_)\n pca_explainedby = np.where(pca_explained>=0.9)[0][0]\n pca = PCA(n_components=pca_explainedby)\n pca.fit(sparse_matrix)\n \n today = datetime.date.today()\n filename = 'sparse_pca_model.pkl'\n joblib.dump(pca, filename)\n \n return pca.transform(sparse_matrix), pca", "def so_sparse(nspins):\n sigma_x = sparse.COO(np.array([[0, 1 / 2], [1 / 2, 0]]))\n sigma_y = sparse.COO(np.array([[0, -1j / 2], [1j / 2, 0]]))\n sigma_z = sparse.COO(np.array([[1 / 2, 0], [0, -1 / 2]]))\n unit = sparse.COO(np.array([[1, 0], [0, 1]]))\n\n L = np.empty((3, nspins, 2 ** nspins, 2 ** nspins), dtype=np.complex128) # consider other dtype?\n # Lxs = []\n # Lys = []\n # Lzs = []\n\n for n in range(nspins):\n Lx_current = 1\n Ly_current = 1\n Lz_current = 1\n\n for k in range(nspins):\n if k == n:\n Lx_current = sparse.kron(Lx_current, sigma_x)\n Ly_current = sparse.kron(Ly_current, sigma_y)\n Lz_current = sparse.kron(Lz_current, sigma_z)\n else:\n Lx_current = sparse.kron(Lx_current, unit)\n Ly_current = sparse.kron(Ly_current, unit)\n Lz_current = sparse.kron(Lz_current, unit)\n\n # Lxs[n] = Lx_current\n # Lys[n] = Ly_current\n # Lzs[n] = Lz_current\n # print(Lx_current.todense())\n L[0][n] = Lx_current.todense()\n L[1][n] = Ly_current.todense()\n L[2][n] = Lz_current.todense()\n Lz_sparse = sparse.COO(L[2])\n L_T = L.transpose(1, 0, 2, 3)\n L_sparse = sparse.COO(L)\n L_T_sparse = sparse.COO(L_T)\n Lproduct = sparse.tensordot(L_T_sparse, L_sparse, axes=((1, 3), (0, 2))).swapaxes(1, 2)\n # Lz_sparse = sparse.COO(L[2])\n Lproduct_sparse = sparse.COO(Lproduct)\n\n return Lz_sparse, Lproduct_sparse", "def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b, sym_pos=True)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)\n else:\n raise NotImplementedError", "def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}" ]
[ "0.7571543", "0.5584805", "0.5499678", "0.5440372", "0.53172606", "0.52564216", "0.52468324", "0.52155113", "0.51478595", "0.514581", "0.5113303", "0.51123667", "0.50142163", "0.5004762", "0.49805087", "0.4961134", "0.49372533", "0.49125624", "0.49123773", "0.48156536", "0.47994202", "0.47681895", "0.47626644", "0.47619405", "0.47579244", "0.4741608", "0.47389966", "0.47219026", "0.4714131", "0.47114325" ]
0.7723914
0
Solves a sparse triangular system of linear equations. sparsetriangularsolvedense(self,transposed_,lnzc_,lptrc_,lsubc_,lvalc_,b_)
def sparsetriangularsolvedense(self,transposed_,lnzc_,lptrc_,lsubc_,lvalc_,b_): n_ = None if n_ is None: n_ = len(b_) elif n_ != len(b_): raise IndexError("Inconsistent length of array b") if n_ is None: n_ = len(lnzc_) elif n_ != len(lnzc_): raise IndexError("Inconsistent length of array lnzc") if n_ is None: n_ = len(lptrc_) elif n_ != len(lptrc_): raise IndexError("Inconsistent length of array lptrc") _lnzc_minlength = (n_) if (n_) > 0 and lnzc_ is not None and len(lnzc_) != (n_): raise ValueError("Array argument lnzc is not long enough: Is %d, expected %d" % (len(lnzc_),(n_))) if lnzc_ is None: raise ValueError("Argument lnzc cannot be None") if lnzc_ is None: raise ValueError("Argument lnzc may not be None") if isinstance(lnzc_, numpy.ndarray) and lnzc_.dtype is numpy.dtype(numpy.int32) and lnzc_.flags.contiguous: _lnzc_copyarray = False _lnzc_tmp = ctypes.cast(lnzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif lnzc_ is not None: _lnzc_copyarray = True _lnzc_np_tmp = numpy.zeros(len(lnzc_),numpy.dtype(numpy.int32)) _lnzc_np_tmp[:] = lnzc_ assert _lnzc_np_tmp.flags.contiguous _lnzc_tmp = ctypes.cast(_lnzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _lnzc_copyarray = False _lnzc_tmp = None _lptrc_minlength = (n_) if (n_) > 0 and lptrc_ is not None and len(lptrc_) != (n_): raise ValueError("Array argument lptrc is not long enough: Is %d, expected %d" % (len(lptrc_),(n_))) if lptrc_ is None: raise ValueError("Argument lptrc cannot be None") if lptrc_ is None: raise ValueError("Argument lptrc may not be None") if isinstance(lptrc_, numpy.ndarray) and lptrc_.dtype is numpy.dtype(numpy.int64) and lptrc_.flags.contiguous: _lptrc_copyarray = False _lptrc_tmp = ctypes.cast(lptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif lptrc_ is not None: _lptrc_copyarray = True _lptrc_np_tmp = numpy.zeros(len(lptrc_),numpy.dtype(numpy.int64)) _lptrc_np_tmp[:] = lptrc_ assert _lptrc_np_tmp.flags.contiguous _lptrc_tmp = ctypes.cast(_lptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _lptrc_copyarray = False _lptrc_tmp = None lensubnval_ = None if lensubnval_ is None: lensubnval_ = len(lsubc_) elif lensubnval_ != len(lsubc_): raise IndexError("Inconsistent length of array lsubc") if lensubnval_ is None: lensubnval_ = len(lvalc_) elif lensubnval_ != len(lvalc_): raise IndexError("Inconsistent length of array lvalc") _lsubc_minlength = (lensubnval_) if (lensubnval_) > 0 and lsubc_ is not None and len(lsubc_) != (lensubnval_): raise ValueError("Array argument lsubc is not long enough: Is %d, expected %d" % (len(lsubc_),(lensubnval_))) if lsubc_ is None: raise ValueError("Argument lsubc cannot be None") if lsubc_ is None: raise ValueError("Argument lsubc may not be None") if isinstance(lsubc_, numpy.ndarray) and lsubc_.dtype is numpy.dtype(numpy.int32) and lsubc_.flags.contiguous: _lsubc_copyarray = False _lsubc_tmp = ctypes.cast(lsubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif lsubc_ is not None: _lsubc_copyarray = True _lsubc_np_tmp = numpy.zeros(len(lsubc_),numpy.dtype(numpy.int32)) _lsubc_np_tmp[:] = lsubc_ assert _lsubc_np_tmp.flags.contiguous _lsubc_tmp = ctypes.cast(_lsubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _lsubc_copyarray = False _lsubc_tmp = None _lvalc_minlength = (lensubnval_) if (lensubnval_) > 0 and lvalc_ is not None and len(lvalc_) != (lensubnval_): raise ValueError("Array argument lvalc is not long enough: Is %d, expected %d" % (len(lvalc_),(lensubnval_))) if lvalc_ is None: raise ValueError("Argument lvalc cannot be None") if lvalc_ is None: raise ValueError("Argument lvalc may not be None") if isinstance(lvalc_, numpy.ndarray) and lvalc_.dtype is numpy.dtype(numpy.float64) and lvalc_.flags.contiguous: _lvalc_copyarray = False _lvalc_tmp = ctypes.cast(lvalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif lvalc_ is not None: _lvalc_copyarray = True _lvalc_np_tmp = numpy.zeros(len(lvalc_),numpy.dtype(numpy.float64)) _lvalc_np_tmp[:] = lvalc_ assert _lvalc_np_tmp.flags.contiguous _lvalc_tmp = ctypes.cast(_lvalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _lvalc_copyarray = False _lvalc_tmp = None _b_minlength = (n_) if (n_) > 0 and b_ is not None and len(b_) != (n_): raise ValueError("Array argument b is not long enough: Is %d, expected %d" % (len(b_),(n_))) if isinstance(b_,numpy.ndarray) and not b_.flags.writeable: raise ValueError("Argument b must be writable") if b_ is None: raise ValueError("Argument b may not be None") if isinstance(b_, numpy.ndarray) and b_.dtype is numpy.dtype(numpy.float64) and b_.flags.contiguous: _b_copyarray = False _b_tmp = ctypes.cast(b_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif b_ is not None: _b_copyarray = True _b_np_tmp = numpy.zeros(len(b_),numpy.dtype(numpy.float64)) _b_np_tmp[:] = b_ assert _b_np_tmp.flags.contiguous _b_tmp = ctypes.cast(_b_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _b_copyarray = False _b_tmp = None res = __library__.MSK_XX_sparsetriangularsolvedense(self.__nativep,transposed_,n_,_lnzc_tmp,_lptrc_tmp,lensubnval_,_lsubc_tmp,_lvalc_tmp,_b_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _b_copyarray: b_[:] = _b_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sparsetriangularsolvedense(self,transposed_,lnzc,lptrc,lsubc,lvalc,b): # 3\n if not isinstance(transposed_,transpose): raise TypeError(\"Argument transposed has wrong type\")\n n_ = None\n if n_ is None:\n n_ = len(b)\n elif n_ != len(b):\n raise IndexError(\"Inconsistent length of array b\")\n if n_ is None:\n n_ = len(lnzc)\n elif n_ != len(lnzc):\n raise IndexError(\"Inconsistent length of array lnzc\")\n if n_ is None:\n n_ = len(lptrc)\n elif n_ != len(lptrc):\n raise IndexError(\"Inconsistent length of array lptrc\")\n if n_ is None: n_ = 0\n if lnzc is None: raise TypeError(\"Invalid type for argument lnzc\")\n if lnzc is None:\n lnzc_ = None\n else:\n try:\n lnzc_ = memoryview(lnzc)\n except TypeError:\n try:\n _tmparr_lnzc = array.array(\"i\",lnzc)\n except TypeError:\n raise TypeError(\"Argument lnzc has wrong type\")\n else:\n lnzc_ = memoryview(_tmparr_lnzc)\n \n else:\n if lnzc_.format != \"i\":\n lnzc_ = memoryview(array.array(\"i\",lnzc))\n \n if lnzc_ is not None and len(lnzc_) != (n_):\n raise ValueError(\"Array argument lnzc has wrong length\")\n if lptrc is None: raise TypeError(\"Invalid type for argument lptrc\")\n if lptrc is None:\n lptrc_ = None\n else:\n try:\n lptrc_ = memoryview(lptrc)\n except TypeError:\n try:\n _tmparr_lptrc = array.array(\"q\",lptrc)\n except TypeError:\n raise TypeError(\"Argument lptrc has wrong type\")\n else:\n lptrc_ = memoryview(_tmparr_lptrc)\n \n else:\n if lptrc_.format != \"q\":\n lptrc_ = memoryview(array.array(\"q\",lptrc))\n \n if lptrc_ is not None and len(lptrc_) != (n_):\n raise ValueError(\"Array argument lptrc has wrong length\")\n lensubnval_ = None\n if lensubnval_ is None:\n lensubnval_ = len(lsubc)\n elif lensubnval_ != len(lsubc):\n raise IndexError(\"Inconsistent length of array lsubc\")\n if lensubnval_ is None:\n lensubnval_ = len(lvalc)\n elif lensubnval_ != len(lvalc):\n raise IndexError(\"Inconsistent length of array lvalc\")\n if lensubnval_ is None: lensubnval_ = 0\n if lsubc is None: raise TypeError(\"Invalid type for argument lsubc\")\n if lsubc is None:\n lsubc_ = None\n else:\n try:\n lsubc_ = memoryview(lsubc)\n except TypeError:\n try:\n _tmparr_lsubc = array.array(\"i\",lsubc)\n except TypeError:\n raise TypeError(\"Argument lsubc has wrong type\")\n else:\n lsubc_ = memoryview(_tmparr_lsubc)\n \n else:\n if lsubc_.format != \"i\":\n lsubc_ = memoryview(array.array(\"i\",lsubc))\n \n if lsubc_ is not None and len(lsubc_) != (lensubnval_):\n raise ValueError(\"Array argument lsubc has wrong length\")\n if lvalc is None: raise TypeError(\"Invalid type for argument lvalc\")\n if lvalc is None:\n lvalc_ = None\n else:\n try:\n lvalc_ = memoryview(lvalc)\n except TypeError:\n try:\n _tmparr_lvalc = array.array(\"d\",lvalc)\n except TypeError:\n raise TypeError(\"Argument lvalc has wrong type\")\n else:\n lvalc_ = memoryview(_tmparr_lvalc)\n \n else:\n if lvalc_.format != \"d\":\n lvalc_ = memoryview(array.array(\"d\",lvalc))\n \n if lvalc_ is not None and len(lvalc_) != (lensubnval_):\n raise ValueError(\"Array argument lvalc has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n _copyback_b = False\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n _copyback_b = True\n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n _copyback_b = True\n if b_ is not None and len(b_) != (n_):\n raise ValueError(\"Array argument b has wrong length\")\n res = self.__obj.sparsetriangularsolvedense(transposed_,n_,lnzc_,lptrc_,lensubnval_,lsubc_,lvalc_,b_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_b:\n b[:] = _tmparr_b", "def analyze_sensitivity_sparse_grid(sparse_grid,max_order=2):\n from pyapprox.multivariate_polynomials import \\\n define_poly_options_from_variable_transformation\n from pyapprox.adaptive_sparse_grid import \\\n convert_sparse_grid_to_polynomial_chaos_expansion\n pce_opts=define_poly_options_from_variable_transformation(\n sparse_grid.variable_transformation)\n pce = convert_sparse_grid_to_polynomial_chaos_expansion(\n sparse_grid,pce_opts)\n pce_main_effects,pce_total_effects=\\\n get_main_and_total_effect_indices_from_pce(\n pce.get_coefficients(),pce.get_indices())\n\n interaction_terms, pce_sobol_indices = get_sobol_indices(\n pce.get_coefficients(),pce.get_indices(),max_order=max_order)\n \n return SensivitityResult(\n {'main_effects':pce_main_effects,\n 'total_effects':pce_total_effects,\n 'sobol_indices':pce_sobol_indices,\n 'sobol_interaction_indices':interaction_terms,\n 'pce':pce})", "def set_DirichletSS_sparse(self):\n \n \n self.set_Dirichlet_vessel(self.inlet)\n\n\n self.tissue_consumption(self.Mt)\n \n #REINITIALISATION OF THE VECTOR OF TISSUE PHI!!!\n self.phi_t=np.zeros(len(self.phit))\n \n self.set_Dirichlet_north(0)\n self.set_Dirichlet_east(0)\n self.set_Dirichlet_west(0)\n \n self.A.eliminate_zeros()", "def TriangleForwardSub(L,b):\n C = solve(L,b)\n return C", "def so_sparse(nspins):\n sigma_x = sparse.COO(np.array([[0, 1 / 2], [1 / 2, 0]]))\n sigma_y = sparse.COO(np.array([[0, -1j / 2], [1j / 2, 0]]))\n sigma_z = sparse.COO(np.array([[1 / 2, 0], [0, -1 / 2]]))\n unit = sparse.COO(np.array([[1, 0], [0, 1]]))\n\n L = np.empty((3, nspins, 2 ** nspins, 2 ** nspins), dtype=np.complex128) # consider other dtype?\n # Lxs = []\n # Lys = []\n # Lzs = []\n\n for n in range(nspins):\n Lx_current = 1\n Ly_current = 1\n Lz_current = 1\n\n for k in range(nspins):\n if k == n:\n Lx_current = sparse.kron(Lx_current, sigma_x)\n Ly_current = sparse.kron(Ly_current, sigma_y)\n Lz_current = sparse.kron(Lz_current, sigma_z)\n else:\n Lx_current = sparse.kron(Lx_current, unit)\n Ly_current = sparse.kron(Ly_current, unit)\n Lz_current = sparse.kron(Lz_current, unit)\n\n # Lxs[n] = Lx_current\n # Lys[n] = Ly_current\n # Lzs[n] = Lz_current\n # print(Lx_current.todense())\n L[0][n] = Lx_current.todense()\n L[1][n] = Ly_current.todense()\n L[2][n] = Lz_current.todense()\n Lz_sparse = sparse.COO(L[2])\n L_T = L.transpose(1, 0, 2, 3)\n L_sparse = sparse.COO(L)\n L_T_sparse = sparse.COO(L_T)\n Lproduct = sparse.tensordot(L_T_sparse, L_sparse, axes=((1, 3), (0, 2))).swapaxes(1, 2)\n # Lz_sparse = sparse.COO(L[2])\n Lproduct_sparse = sparse.COO(Lproduct)\n\n return Lz_sparse, Lproduct_sparse", "def _triangulate(self,x):\n\n t = tr.triangulate({\"vertices\": x},\"-n\")\n tri = t[\"triangles\"]\n neighbours = t[\"neighbors\"]\n\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n\n three_b_cell_mask = b_cells[tri].sum(axis=1)==3\n tri = tri[~three_b_cell_mask]\n\n neigh_map = np.cumsum(~three_b_cell_mask)-1\n neigh_map[three_b_cell_mask] = -1\n neigh_map = np.concatenate((neigh_map,[-1]))\n\n neighbours = neighbours[~three_b_cell_mask]\n neighbours = neigh_map[neighbours]\n\n #6. Store outputs\n self.tris = tri\n self.n_v = tri.shape[0]\n self.Cents = x[self.tris]\n self.vs = self.get_vertex()\n\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n self.v_neighbours = neighbours\n self.neighbours = self.vs[neighbours]\n self.neighbours[neighbours == -1] = np.nan\n\n self.reset_k2s()", "def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]", "def solve_triangular(a, b, lower=False):\n # TODO maybe commit this to gvar.linalg\n # TODO can I raise a LinAlgError if a[i,i] is 0, and still return the\n # result and have it assigned to a variable using try...finally inside this\n # function?\n x = np.copy(b)\n a = a.reshape(a.shape + (1,) * len(x.shape[1:]))\n if lower:\n x[0] /= a[0, 0]\n for i in range(1, len(x)):\n x[i:] -= x[i - 1] * a[i:, i - 1]\n x[i] /= a[i, i]\n else:\n x[-1] /= a[-1, -1]\n for i in range(len(x) - 1, 0, -1):\n x[:i] -= x[i] * a[:i, i]\n x[i - 1] /= a[i - 1, i - 1]\n return x", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc_,aptrc_,asubc_,avalc_):\n n_ = None\n if n_ is None:\n n_ = len(anzc_)\n elif n_ != len(anzc_):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc_)\n elif n_ != len(aptrc_):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc cannot be None\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc may not be None\")\n if isinstance(anzc_, numpy.ndarray) and anzc_.dtype is numpy.dtype(numpy.int32) and anzc_.flags.contiguous:\n _anzc_copyarray = False\n _anzc_tmp = ctypes.cast(anzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif anzc_ is not None:\n _anzc_copyarray = True\n _anzc_np_tmp = numpy.zeros(len(anzc_),numpy.dtype(numpy.int32))\n _anzc_np_tmp[:] = anzc_\n assert _anzc_np_tmp.flags.contiguous\n _anzc_tmp = ctypes.cast(_anzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _anzc_copyarray = False\n _anzc_tmp = None\n \n if aptrc_ is None:\n raise ValueError(\"Argument aptrc cannot be None\")\n if aptrc_ is None:\n raise ValueError(\"Argument aptrc may not be None\")\n if isinstance(aptrc_, numpy.ndarray) and aptrc_.dtype is numpy.dtype(numpy.int64) and aptrc_.flags.contiguous:\n _aptrc_copyarray = False\n _aptrc_tmp = ctypes.cast(aptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif aptrc_ is not None:\n _aptrc_copyarray = True\n _aptrc_np_tmp = numpy.zeros(len(aptrc_),numpy.dtype(numpy.int64))\n _aptrc_np_tmp[:] = aptrc_\n assert _aptrc_np_tmp.flags.contiguous\n _aptrc_tmp = ctypes.cast(_aptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _aptrc_copyarray = False\n _aptrc_tmp = None\n \n if asubc_ is None:\n raise ValueError(\"Argument asubc cannot be None\")\n if asubc_ is None:\n raise ValueError(\"Argument asubc may not be None\")\n if isinstance(asubc_, numpy.ndarray) and asubc_.dtype is numpy.dtype(numpy.int32) and asubc_.flags.contiguous:\n _asubc_copyarray = False\n _asubc_tmp = ctypes.cast(asubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asubc_ is not None:\n _asubc_copyarray = True\n _asubc_np_tmp = numpy.zeros(len(asubc_),numpy.dtype(numpy.int32))\n _asubc_np_tmp[:] = asubc_\n assert _asubc_np_tmp.flags.contiguous\n _asubc_tmp = ctypes.cast(_asubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asubc_copyarray = False\n _asubc_tmp = None\n \n if avalc_ is None:\n raise ValueError(\"Argument avalc cannot be None\")\n if avalc_ is None:\n raise ValueError(\"Argument avalc may not be None\")\n if isinstance(avalc_, numpy.ndarray) and avalc_.dtype is numpy.dtype(numpy.float64) and avalc_.flags.contiguous:\n _avalc_copyarray = False\n _avalc_tmp = ctypes.cast(avalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif avalc_ is not None:\n _avalc_copyarray = True\n _avalc_np_tmp = numpy.zeros(len(avalc_),numpy.dtype(numpy.float64))\n _avalc_np_tmp[:] = avalc_\n assert _avalc_np_tmp.flags.contiguous\n _avalc_tmp = ctypes.cast(_avalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _avalc_copyarray = False\n _avalc_tmp = None\n \n perm_ptr = ctypes.POINTER(ctypes.c_int32)()\n diag_ptr = ctypes.POINTER(ctypes.c_double)()\n lnzc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lptrc_ptr = ctypes.POINTER(ctypes.c_int64)()\n lensubnval_ = ctypes.c_int64()\n lsubc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lvalc_ptr = ctypes.POINTER(ctypes.c_double)()\n res = __library__.MSK_XX_computesparsecholesky(self.__nativep,multithread_,ordermethod_,tolsingular_,n_,_anzc_tmp,_aptrc_tmp,_asubc_tmp,_avalc_tmp,ctypes.byref(perm_ptr),ctypes.byref(diag_ptr),ctypes.byref(lnzc_ptr),ctypes.byref(lptrc_ptr),ctypes.byref(lensubnval_),ctypes.byref(lsubc_ptr),ctypes.byref(lvalc_ptr))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n perm_arr = perm_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,perm_ptr)\n diag_arr = diag_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,diag_ptr)\n lnzc_arr = lnzc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lnzc_ptr)\n lptrc_arr = lptrc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lptrc_ptr)\n lensubnval_ = lensubnval_.value\n _lensubnval_return_value = lensubnval_\n lsubc_arr = lsubc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lsubc_ptr)\n lvalc_arr = lvalc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lvalc_ptr)\n return (perm_arr,diag_arr,lnzc_arr,lptrc_arr,_lensubnval_return_value,lsubc_arr,lvalc_arr)", "def analyticalLinearSol(self, t):\n return self.c*t + self.I", "def test_solve_quadratic_fixed(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.val = 4\n iden2.x.val = 5\n iden3.x.val = 6\n iden1.x.name = 'x1'\n iden2.x.name = 'x2'\n iden3.x.name = 'x3'\n iden2.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 10)\n self.assertAlmostEqual(iden1.x.val, 4)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 6)", "def linear_regression(d, ind, dep):\n\n\ty=d.get_data([dep])\n\tprint \"y :\",y\n\tA=d.get_data(ind)\n\tprint \"A :\",A\n\tones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()\n\tA=np.concatenate((A, ones), axis=1)\n\tprint \"concatenated A :\",A\n\tAAinv=np.linalg.inv( np.dot(A.transpose(), A))\n\tprint \"AAinv: \\n\",AAinv\n\t\"\"\"\n\tprint \"A :\",A\n\tprint \"y: \",y\n\tprint \"AAinv: \",AAinv\"\"\"\n\tprint \"shape A:\t \",A.shape\n\tprint \"shape y\t:\", y.shape\n\tx=np.linalg.lstsq(A,y)\n\tprint \"x :\\n\",x\n\tb=x[0]\n\tprint \"\\n b : \\n\",b\n\tN=len(y)\n\tprint \"N :\t\\n\",N\n\tC=len(b)\n\tprint \"C :\t \",C\n\tdf_e=N-C\n\tdf_r=C-1\n\terror=y - np.dot(A, b)\n\tprint \"error:\t\",error\n\tsse=np.dot(error.transpose(), error) / df_e\n\tprint \"sse\t:\",sse\n\tstderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )\n\tprint \"stderr: \",stderr\n\tt = b.transpose() / stderr\n\tprint \"t :\", t\n\tp=2*(1 - scipy.stats.t.cdf(abs(t), df_e))\n\tprint \"p:\t\",p\n\tr2=1 - error.var() / y.var()\n\tprint \"R^2\t :\",r2, \"\\n \\n \\n \\n*************************************\"\n\t\n\t\n\treturn [b,sse,r2,t,p]", "def test_solve_lsap_with_removed_row():\n num_rows = 10\n num_cols = 500\n num_rounds = 100\n\n for i in range(num_rounds):\n # Note that here we set all costs to integer values, which might\n # lead to existence of multiple solutions.\n cost_matrix = np.random.randint(10, size=(num_rows, num_cols))\n cost_matrix = cost_matrix.astype(np.double)\n\n removed_row = random.randint(0, num_rows - 1)\n row_idx_1, col_idx_1 = linear_sum_assignment(cost_matrix)\n\n # Get the submatrix with the removed row\n sub_cost_matrix = cost_matrix[~one_hot(removed_row, num_rows), :]\n sub_row_idx_1, sub_col_idx_1 = linear_sum_assignment(sub_cost_matrix)\n\n # Solve the problem with dynamic algorithm\n row4col, col4row, u, v = lap._solve(cost_matrix)\n assert (\n np.array_equal(col_idx_1, col4row)\n or cost_matrix[row_idx_1, col_idx_1].sum()\n == cost_matrix[row_idx_1, col4row].sum()\n )\n\n lap.solve_lsap_with_removed_row(cost_matrix, removed_row, row4col, col4row, v)\n assert (\n np.array_equal(sub_col_idx_1, col4row[~one_hot(removed_row, num_rows)])\n or sub_cost_matrix[sub_row_idx_1, sub_col_idx_1].sum()\n == cost_matrix[\n ~one_hot(removed_row, num_rows),\n col4row[~one_hot(removed_row, num_rows)],\n ].sum()\n )", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def test_solve_quadratic(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.fixed = False\n iden2.x.fixed = False\n iden3.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 0)\n self.assertAlmostEqual(iden1.x.val, 1)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 3)", "def cp_sparse(tensor, rank, penalties, nonneg=False, init=None, warmstart=True,\n tol=1e-6, min_time=0, max_time=np.inf, n_iter_max=1000, print_every=0.3,\n prepend_print='\\r', append_print=''):\n\n # default initialization method\n if init is None:\n init = 'randn' if nonneg is False else 'rand'\n\n # initialize factors\n if warmstart:\n factors, _ = cp_als(tensor, rank, nonneg=nonneg, tol=tol)\n else:\n factors = _cp_initialize(tensor, rank, init)\n\n def _compute_penalty(_factors):\n return np.sum([lam*np.sum(np.abs(f)) for lam, f in zip(penalties, _factors)])\n\n # setup optimization\n converged = False\n norm_tensor = tensorly.tenalg.norm(tensor, 2)\n t_elapsed = [0]\n obj_history = [_compute_squared_recon_error(tensor, factors, norm_tensor) + _compute_penalty(factors)]\n\n # initial print statement\n verbose = print_every > 0\n print_counter = 0 # time to print next progress\n if verbose:\n print(prepend_print+'iter=0, error={0:.4f}'.format(obj_history[-1]), end=append_print)\n\n # gradient descent params\n linesearch_iters = 100\n\n # main loop\n t0 = time()\n for iteration in range(n_iter_max):\n\n # alternating optimization over modes\n for mode in range(tensor.ndim):\n # current optimization state\n stepsize = 1.0\n old_obj = obj_history[-1]\n fctr = factors[mode].copy()\n\n # keep track of positive and negative elements\n if not nonneg:\n pos = fctr > 0\n neg = fctr < 0\n\n # form unfolding and khatri-rao product\n unf = unfold(tensor, mode)\n kr = khatri_rao(factors, skip_matrix=mode)\n\n # calculate gradient\n kr_t_kr = np.dot(kr.T, kr)\n gradient = np.dot(fctr, kr_t_kr) - np.dot(unf, kr)\n\n # proximal gradient update\n new_obj = np.inf\n\n for liter in range(linesearch_iters):\n # take gradient step\n new_fctr = fctr - stepsize*gradient\n\n # iterative soft-thresholding\n if nonneg:\n new_fctr -= stepsize*penalties[mode]\n new_fctr[new_fctr<0] = 0.0\n else:\n new_fctr[pos] -= stepsize*penalties[mode]\n new_fctr[neg] += stepsize*penalties[mode]\n sign_changes = (new_factor > 0 & neg) | (new_factor < 0 & pos)\n new_fctr[sign_changes] = 0.0\n\n # calculate new error\n factors[mode] = new_fctr\n new_obj = _compute_squared_recon_error(tensor, factors, norm_tensor) + _compute_penalty(factors)\n\n # break if error went down\n if new_obj < old_obj:\n factors[mode] = new_fctr\n break\n # decrease step size if error went up\n else:\n stepsize /= 2.0\n # give up if too many iterations\n if liter == (linesearch_iters - 1):\n factors[mode] = fctr\n new_obj = old_obj\n\n # renormalize factors\n factors = standardize_factors(factors, sort_factors=False)\n\n # check convergence\n t_elapsed.append(time() - t0)\n obj_history.append(new_obj)\n\n # break loop if converged\n converged = abs(obj_history[-2] - obj_history[-1]) < tol\n if converged and (time()-t0)>min_time:\n if verbose: print(prepend_print+'converged in {} iterations.'.format(iteration+1), end=append_print)\n break\n\n # display progress\n if verbose and (time()-t0)/print_every > print_counter:\n print_str = 'iter={0:d}, error={1:.4f}, variation={2:.4f}'.format(\n iteration+1, obj_history[-1], obj_history[-2] - obj_history[-1])\n print(prepend_print+print_str, end=append_print)\n print_counter += print_every\n\n # stop early if over time\n if (time()-t0)>max_time:\n break\n\n if not converged and verbose:\n print('gave up after {} iterations and {} seconds'.format(iteration, time()-t0), end=append_print)\n\n # return optimized factors and info\n return factors, { 'err_hist' : obj_history,\n 't_hist' : t_elapsed,\n 'err_final' : obj_history[-1],\n 'converged' : converged,\n 'iterations' : len(obj_history) }", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc,aptrc,asubc,avalc): # 3\n n_ = None\n if n_ is None:\n n_ = len(anzc)\n elif n_ != len(anzc):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc)\n elif n_ != len(aptrc):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if n_ is None: n_ = 0\n if anzc is None: raise TypeError(\"Invalid type for argument anzc\")\n if anzc is None:\n anzc_ = None\n else:\n try:\n anzc_ = memoryview(anzc)\n except TypeError:\n try:\n _tmparr_anzc = array.array(\"i\",anzc)\n except TypeError:\n raise TypeError(\"Argument anzc has wrong type\")\n else:\n anzc_ = memoryview(_tmparr_anzc)\n \n else:\n if anzc_.format != \"i\":\n anzc_ = memoryview(array.array(\"i\",anzc))\n \n if aptrc is None: raise TypeError(\"Invalid type for argument aptrc\")\n if aptrc is None:\n aptrc_ = None\n else:\n try:\n aptrc_ = memoryview(aptrc)\n except TypeError:\n try:\n _tmparr_aptrc = array.array(\"q\",aptrc)\n except TypeError:\n raise TypeError(\"Argument aptrc has wrong type\")\n else:\n aptrc_ = memoryview(_tmparr_aptrc)\n \n else:\n if aptrc_.format != \"q\":\n aptrc_ = memoryview(array.array(\"q\",aptrc))\n \n if asubc is None: raise TypeError(\"Invalid type for argument asubc\")\n if asubc is None:\n asubc_ = None\n else:\n try:\n asubc_ = memoryview(asubc)\n except TypeError:\n try:\n _tmparr_asubc = array.array(\"i\",asubc)\n except TypeError:\n raise TypeError(\"Argument asubc has wrong type\")\n else:\n asubc_ = memoryview(_tmparr_asubc)\n \n else:\n if asubc_.format != \"i\":\n asubc_ = memoryview(array.array(\"i\",asubc))\n \n if avalc is None: raise TypeError(\"Invalid type for argument avalc\")\n if avalc is None:\n avalc_ = None\n else:\n try:\n avalc_ = memoryview(avalc)\n except TypeError:\n try:\n _tmparr_avalc = array.array(\"d\",avalc)\n except TypeError:\n raise TypeError(\"Argument avalc has wrong type\")\n else:\n avalc_ = memoryview(_tmparr_avalc)\n \n else:\n if avalc_.format != \"d\":\n avalc_ = memoryview(array.array(\"d\",avalc))\n \n res,resargs = self.__obj.computesparsecholesky(multithread_,ordermethod_,tolsingular_,n_,anzc_,aptrc_,asubc_,avalc_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc = resargs\n return _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc", "def _lin_solve(b, x, x0, a, c, iterations, n):\n c_recip = 1 / c\n for k in range(0, iterations):\n for m in range(1, n - 1):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, m, n)] = (x0[index_of(i, j, m, n)] + a * (x[index_of(i + 1, j, m, n)]\n + x[index_of(i - 1, j, m, n)]\n + x[index_of(i, j + 1, m, n)]\n + x[index_of(i, j - 1, m, n)]\n + x[index_of(i, j, m + 1, n)]\n + x[index_of(i, j, m - 1, n)]\n )) * c_recip\n _set_bounds(b, x, n)", "def precondition_sparse_matrix(A: lil_matrix) -> linalg.LinearOperator:\n ilu = linalg.spilu(A)\n Mx = ilu.solve\n return linalg.LinearOperator(A.shape, Mx)", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def solve(raster):\n cells_changed = True\n while cells_changed:\n cells_changed = False\n for meta in raster.row_meta:\n mask = raster.get_row(meta.idx)\n orig_meta = copy.deepcopy(meta)\n\n linesolve(mask, meta)\n\n if raster.update_row(mask=mask, idx=meta.idx) or meta != orig_meta:\n cells_changed = True\n\n for meta in raster.col_meta:\n mask = raster.get_col(meta.idx)\n orig_meta = copy.deepcopy(meta)\n\n linesolve(mask, meta)\n\n if raster.update_col(mask=mask, idx=meta.idx) or meta != orig_meta:\n cells_changed = True\n\n if raster.is_solved():\n return Solution(raster.table)\n\n return None", "def calculate_littrow_sol(p, loc, ll, iteration=0, log=False):\n func_name = __NAME__ + '.calculate_littrow_sol()'\n # get parameters from p\n remove_orders = p['IC_LITTROW_REMOVE_ORDERS']\n # TODO: Fudge factor - Melissa will fix this :)\n n_order_init = p['IC_LITTROW_ORDER_INIT_{0}'.format(1)]\n n_order_final = p['IC_HC_N_ORD_FINAL']\n n_order_start = p['IC_HC_N_ORD_START']\n x_cut_step = p['IC_LITTROW_CUT_STEP_{0}'.format(iteration)]\n fit_degree = p['IC_LITTROW_FIT_DEG_{0}'.format(iteration)]\n # get parameters from loc\n torder = loc['ECHELLE_ORDERS']\n ll_out = ll\n # test if n_order_init is in remove_orders\n if n_order_init in remove_orders:\n # TODO: Fudge factor - Melissa will fix this\n wargs = ['IC_LITTROW_ORDER_INIT_{0}'.format(1),\n p['IC_LITTROW_ORDER_INIT_{0}'.format(1)],\n \"IC_LITTROW_REMOVE_ORDERS\"]\n wmsg1 = 'Warning {0}={1} in {2}'.format(*wargs)\n wmsg2 = ' Please check constants file'\n wmsg3 = ' function = {0}'.format(func_name)\n WLOG(p, 'error', [wmsg1, wmsg2, wmsg3])\n # test if n_order_init is in remove_orders\n if n_order_final in remove_orders:\n wargs = [\"IC_HC_N_ORD_FINAL\", p['IC_HC_N_ORD_FINAL'],\n \"IC_LITTROW_REMOVE_ORDERS\"]\n wmsg1 = 'Warning {0}={1} in {2}'.format(*wargs)\n wmsg2 = ' Please check constants file'\n wmsg3 = ' function = {0}'.format(func_name)\n WLOG(p, 'error', [wmsg1, wmsg2, wmsg3])\n # check that all remove orders exist\n for remove_order in remove_orders:\n if remove_order not in np.arange(n_order_final):\n wargs1 = [remove_order, 'IC_LITTROW_REMOVE_ORDERS', n_order_init,\n n_order_final]\n wmsg1 = (' Invalid order number={0} in {1} must be between'\n '{2} and {3}'.format(*wargs1))\n wmsg2 = ' function = {0}'.format(func_name)\n WLOG(p, 'error', [wmsg1, wmsg2])\n\n # check to make sure we have some orders left\n if len(np.unique(remove_orders)) == n_order_final - n_order_start:\n wmsg = 'Cannot remove all orders. Check IC_LITTROW_REMOVE_ORDERS'\n WLOG(p, 'error', wmsg)\n # get the total number of orders to fit\n num_orders = len(loc['ECHELLE_ORDERS'])\n # get the dimensions of the data\n ydim, xdim = loc['HCDATA'].shape\n # deal with removing orders (via weighting stats)\n rmask = np.ones(num_orders, dtype=bool)\n if len(remove_orders) > 0:\n rmask[np.array(remove_orders)] = False\n # storage of results\n keys = ['LITTROW_MEAN', 'LITTROW_SIG', 'LITTROW_MINDEV',\n 'LITTROW_MAXDEV', 'LITTROW_PARAM', 'LITTROW_XX', 'LITTROW_YY',\n 'LITTROW_INVORD', 'LITTROW_FRACLL', 'LITTROW_PARAM0',\n 'LITTROW_MINDEVORD', 'LITTROW_MAXDEVORD']\n for key in keys:\n nkey = key + '_{0}'.format(iteration)\n loc[nkey] = []\n loc.set_source(nkey, func_name)\n # construct the Littrow cut points\n x_cut_points = np.arange(x_cut_step, xdim-x_cut_step, x_cut_step)\n # save to storage\n loc['X_CUT_POINTS_{0}'.format(iteration)] = x_cut_points\n # get the echelle order values\n # TODO check if mask needs resizing\n orderpos = torder[rmask]\n # get the inverse order number\n inv_orderpos = 1.0 / orderpos\n # loop around cut points and get littrow parameters and stats\n for it in range(len(x_cut_points)):\n # this iterations x cut point\n x_cut_point = x_cut_points[it]\n # get the fractional wavelength contrib. at each x cut point\n ll_point = ll_out[:, x_cut_point][rmask]\n ll_start_point = ll_out[n_order_init, x_cut_point]\n frac_ll_point = ll_point/ll_start_point\n # fit the inverse order numbers against the fractional\n # wavelength contrib.\n coeffs = nanpolyfit(inv_orderpos, frac_ll_point, fit_degree)[::-1]\n coeffs0 = nanpolyfit(inv_orderpos, frac_ll_point, fit_degree)[::-1]\n # calculate the fit values\n cfit = np.polyval(coeffs[::-1], inv_orderpos)\n # calculate the residuals\n res = cfit - frac_ll_point\n # find the largest residual\n largest = np.max(abs(res))\n sigmaclip = abs(res) != largest\n # remove the largest residual\n inv_orderpos_s = inv_orderpos[sigmaclip]\n frac_ll_point_s = frac_ll_point[sigmaclip]\n # refit the inverse order numbers against the fractional\n # wavelength contrib. after sigma clip\n coeffs = nanpolyfit(inv_orderpos_s, frac_ll_point_s, fit_degree)[::-1]\n # calculate the fit values (for all values - including sigma clipped)\n cfit = np.polyval(coeffs[::-1], inv_orderpos)\n # calculate residuals (in km/s) between fit and original values\n respix = speed_of_light * (cfit - frac_ll_point)/frac_ll_point\n # calculate stats\n mean = np.nansum(respix) / len(respix)\n mean2 = np.nansum(respix ** 2) / len(respix)\n rms = np.sqrt(mean2 - mean ** 2)\n mindev = np.min(respix)\n maxdev = np.max(respix)\n mindev_ord = np.argmin(respix)\n maxdev_ord = np.argmax(respix)\n # add to storage\n loc['LITTROW_INVORD_{0}'.format(iteration)].append(inv_orderpos)\n loc['LITTROW_FRACLL_{0}'.format(iteration)].append(frac_ll_point)\n loc['LITTROW_MEAN_{0}'.format(iteration)].append(mean)\n loc['LITTROW_SIG_{0}'.format(iteration)].append(rms)\n loc['LITTROW_MINDEV_{0}'.format(iteration)].append(mindev)\n loc['LITTROW_MAXDEV_{0}'.format(iteration)].append(maxdev)\n loc['LITTROW_MINDEVORD_{0}'.format(iteration)].append(mindev_ord)\n loc['LITTROW_MAXDEVORD_{0}'.format(iteration)].append(maxdev_ord)\n loc['LITTROW_PARAM_{0}'.format(iteration)].append(coeffs)\n loc['LITTROW_PARAM0_{0}'.format(iteration)].append(coeffs0)\n loc['LITTROW_XX_{0}'.format(iteration)].append(orderpos)\n loc['LITTROW_YY_{0}'.format(iteration)].append(respix)\n # if log then log output\n if log:\n emsg1 = 'Littrow check at X={0}'.format(x_cut_point)\n eargs = [mean * 1000, rms * 1000, mindev * 1000, maxdev * 1000,\n mindev/rms, maxdev/rms]\n emsg2 = (' mean:{0:.3f}[m/s] rms:{1:.2f}[m/s] min/max:{2:.2f}/'\n '{3:.2f}[m/s] (frac:{4:.1f}/{5:.1f})'.format(*eargs))\n WLOG(p, '', [emsg1, emsg2])\n\n # return loc\n return loc", "def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)", "def jacobian_cdas( func, scl, lint=0.8, tol=1e-12, eps = 1e-30, withScl = False ):\n scl = abs(asarray(scl).flatten())\n N = len(scl)\n lint = abs(lint)\n def centDiffJacAutoScl( arg ):\n \"\"\"\n Algorithm: use the value of the function at the center point\n to test linearity of the function. Linearity is tested by\n taking dy+ and dy- for each dx, and ensuring that they\n satisfy lint<|dy+|/|dy-|<1/lint\n \"\"\"\n x0 = asarray(arg).flatten()\n y0 = func(x0)\n s = scl.copy()\n #print \"Jac at \",x0\n idx = slice(None)\n dyp = empty((len(s),len(y0)),x0.dtype)\n dyn = empty_like(dyp)\n while True:\n #print \"Jac iter \",s\n d0 = diag(s)\n dyp[idx,:] = [ func(x0+dx)-y0 for dx in d0[idx,:] ]\n dypc = dyp.conj()\n dyn[idx,:] = [ func(x0-dx)-y0 for dx in d0[idx,:] ]\n dync = dyn.conj()\n dp = sum(dyp * dypc,axis=1)\n dn = sum(dyn * dync,axis=1)\n nul = (dp == 0) | (dn == 0)\n if any(nul):\n s[nul] *= 1.5\n continue\n rat = dp/(dn+eps)\n nl = ((rat<lint) | (rat>(1.0/lint)))\n # If no linearity violations found --> done\n if ~any(nl):\n break\n # otherwise -- decrease steps\n idx, = nl.flatten().nonzero()\n s[idx] *= 0.75\n # Don't allow steps smaller than tol\n s[idx[s[idx]<tol]] = tol\n if all(s[idx]<tol):\n break\n res = ((dyp-dyn)/(2*s[:,newaxis])).T\n if withScl:\n return res, s\n return res\n return centDiffJacAutoScl", "def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):\n scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0,\n 'q': dim_dict['q'] if 'q' in dim_dict else [],\n 's': dim_dict['s'] if 's' in dim_dict else [],\n 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,\n 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0,\n 'f': dim_dict['z'] if 'z' in dim_dict else 0}\n #print('scs_cones', scs_cones)\n sol = scs.solve({'A': A, 'b': b,\n 'c': c},\n cone=scs_cones,\n **kwargs)\n info = sol['info']\n\n if info['statusVal'] > 0:\n z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1., kappa=0.)\n\n if info['statusVal'] < 0:\n x = np.zeros_like(sol['x']) \\\n if np.any(np.isnan(sol['x'])) else sol['x']\n\n s = np.zeros_like(sol['s']) \\\n if np.any(np.isnan(sol['s'])) else sol['s']\n\n y = np.zeros_like(sol['y']) \\\n if np.any(np.isnan(sol['y'])) else sol['y']\n\n if np.allclose(y, 0.) and c@x < 0:\n obj = c@x\n # assert obj < 0\n x /= -obj\n s /= -obj\n # print('primal res:', np.linalg.norm(A@x + s))\n\n if np.allclose(s, 0.) and b@y < 0:\n obj = b@y\n # assert obj < 0\n y /= -obj\n # print('dual res:', np.linalg.norm(A.T@y))\n\n # print('SCS NONSOLVED')\n # print('x', x)\n # print('s', s)\n # print('y', y)\n\n z = xsy2z(x, s, y, tau=0., kappa=1.)\n\n return z, info", "def test_Schur_Sp_solve():\n mat_A = load_matrix_step_noslip()\n petsc_options = initialize_petsc_options()\n b, x = create_petsc_vecs(mat_A)\n\n solver_info = LS.ModelInfo('interlaced', 3)\n schur_approx = LS.Schur_Sp(mat_A,\n '',\n solver_info=solver_info)\n ksp_obj = initialize_schur_ksp_obj(mat_A, schur_approx)\n ksp_obj.solve(b,x)\n\n assert ksp_obj.converged == True\n assert ksp_obj.reason == 2\n assert float(ksp_obj.norm) < 1.0e-5\n assert ksp_obj.its == 63", "def check_matrix(solver,matrix):\n model_validated = True\n #To check that the negation of the given formula is UNSAT under the clauses \n #in the solver we check if the negation of each clause is UNSAT under these clauses.\n for clause in matrix:\n negated_claus=[-l for l in clause]\n model_validated = not solver.solve(negated_claus)\n if not model_validated:\n print(\"Falsified Clause: {}\".format(clause))\n return False\n return True", "def solve_lpt(engine, pt, aend, dlinear_k, s, v, s1, s2):\n code = CodeSegment(engine)\n code.solve_linear_displacement(source_k='dlinear_k', s=s1)\n code.generate_2nd_order_source(source_k='dlinear_k', source2_k='source2_k')\n code.solve_linear_displacement(source_k='source2_k', s=s2)\n\n code.bilinear(x1='s1', c1=pt.D1(aend),\n x2='s2', c2=pt.D2(aend),\n y=s)\n\n code.bilinear(x1='s1', c1=pt.f1(aend) * aend ** 2 * pt.E(aend) * pt.D1(aend),\n x2='s2', c2=pt.f2(aend) * aend ** 2 * pt.E(aend) * pt.D2(aend),\n y=v)\n return code", "def train_model(args, tr_sparse):\n tf.logging.info('Train Start: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # generate model\n input_tensor, row_factor, col_factor, model = wals.wals_model(tr_sparse,\n args.latent_factors,\n args.regularization,\n args.unobs_weight,\n args.weights,\n args.wt_type,\n args.feature_wt_exp,\n args.feature_wt_factor)\n \n # factorize matrix\n session = wals.simple_train(model, input_tensor, args.num_iters)\n \n tf.logging.info('Train Finish: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # evaluate output factor matrices\n output_row = row_factor.eval(session=session)\n output_col = col_factor.eval(session=session)\n \n # close the training session \n session.close()\n \n return output_row, output_col", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()" ]
[ "0.8438944", "0.54154617", "0.5376753", "0.53284895", "0.5311217", "0.52064824", "0.51942706", "0.51143026", "0.49586394", "0.4949625", "0.49304515", "0.49237528", "0.48860884", "0.4882467", "0.48812094", "0.48605898", "0.48446497", "0.4842036", "0.48256987", "0.48181957", "0.48092657", "0.47964278", "0.47785026", "0.47241294", "0.47109774", "0.46885413", "0.46807936", "0.4662071", "0.46619526", "0.46545362" ]
0.8483068
0
Computes all eigenvalues of a symmetric dense matrix. syeig(self,uplo_,n_,a_,w_)
def syeig(self,uplo_,n_,a_,w_): _a_minlength = ((n_) * (n_)) if ((n_) * (n_)) > 0 and a_ is not None and len(a_) != ((n_) * (n_)): raise ValueError("Array argument a is not long enough: Is %d, expected %d" % (len(a_),((n_) * (n_)))) if a_ is None: raise ValueError("Argument a cannot be None") if a_ is None: raise ValueError("Argument a may not be None") if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous: _a_copyarray = False _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif a_ is not None: _a_copyarray = True _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64)) _a_np_tmp[:] = a_ assert _a_np_tmp.flags.contiguous _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _a_copyarray = False _a_tmp = None _w_minlength = (n_) if (n_) > 0 and w_ is not None and len(w_) != (n_): raise ValueError("Array argument w is not long enough: Is %d, expected %d" % (len(w_),(n_))) if isinstance(w_,numpy.ndarray) and not w_.flags.writeable: raise ValueError("Argument w must be writable") if w_ is None: raise ValueError("Argument w may not be None") if isinstance(w_, numpy.ndarray) and w_.dtype is numpy.dtype(numpy.float64) and w_.flags.contiguous: _w_copyarray = False _w_tmp = ctypes.cast(w_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif w_ is not None: _w_copyarray = True _w_np_tmp = numpy.zeros(len(w_),numpy.dtype(numpy.float64)) _w_np_tmp[:] = w_ assert _w_np_tmp.flags.contiguous _w_tmp = ctypes.cast(_w_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _w_copyarray = False _w_tmp = None res = __library__.MSK_XX_syeig(self.__nativep,uplo_,n_,_a_tmp,_w_tmp) if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1]) if _w_copyarray: w_[:] = _w_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def syeig(self,uplo_,n_,a,w): # 3\n if not isinstance(uplo_,uplo): raise TypeError(\"Argument uplo has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (n_)):\n raise ValueError(\"Array argument a has wrong length\")\n if w is None: raise TypeError(\"Invalid type for argument w\")\n _copyback_w = False\n if w is None:\n w_ = None\n else:\n try:\n w_ = memoryview(w)\n except TypeError:\n try:\n _tmparr_w = array.array(\"d\",w)\n except TypeError:\n raise TypeError(\"Argument w has wrong type\")\n else:\n w_ = memoryview(_tmparr_w)\n _copyback_w = True\n else:\n if w_.format != \"d\":\n w_ = memoryview(array.array(\"d\",w))\n _copyback_w = True\n if w_ is not None and len(w_) != (n_):\n raise ValueError(\"Array argument w has wrong length\")\n res = self.__obj.syeig(uplo_,n_,a_,w_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_w:\n w[:] = _tmparr_w", "def syevd(self,uplo_,n_,a,w): # 3\n if not isinstance(uplo_,uplo): raise TypeError(\"Argument uplo has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n _copyback_a = False\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n _copyback_a = True\n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n _copyback_a = True\n if a_ is not None and len(a_) != ((n_) * (n_)):\n raise ValueError(\"Array argument a has wrong length\")\n if w is None: raise TypeError(\"Invalid type for argument w\")\n _copyback_w = False\n if w is None:\n w_ = None\n else:\n try:\n w_ = memoryview(w)\n except TypeError:\n try:\n _tmparr_w = array.array(\"d\",w)\n except TypeError:\n raise TypeError(\"Argument w has wrong type\")\n else:\n w_ = memoryview(_tmparr_w)\n _copyback_w = True\n else:\n if w_.format != \"d\":\n w_ = memoryview(array.array(\"d\",w))\n _copyback_w = True\n if w_ is not None and len(w_) != (n_):\n raise ValueError(\"Array argument w has wrong length\")\n res = self.__obj.syevd(uplo_,n_,a_,w_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_w:\n w[:] = _tmparr_w\n if _copyback_a:\n a[:] = _tmparr_a", "def _symmetric_compute_eigenvalues(S_elems):\n\n if len(S_elems) == 3: # Use fast Cython code for 2D\n eigs = cp.stack(_image_orthogonal_matrix22_eigvals(*S_elems))\n else:\n matrices = _symmetric_image(S_elems)\n # eigvalsh returns eigenvalues in increasing order. We want decreasing\n eigs = cp.linalg.eigvalsh(matrices)[..., ::-1]\n leading_axes = tuple(range(eigs.ndim - 1))\n eigs = cp.transpose(eigs, (eigs.ndim - 1,) + leading_axes)\n return eigs", "def hessian_matrix_eigvals(H_elems):\n return _symmetric_compute_eigenvalues(H_elems)", "def eigenvalues(self, expand=False, factor=False, simplify=False):\n raise NotImplementedError", "def matrix_eig(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n hermitian=False,\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n evenTrunc = False,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n # If chis is not specfied, there is no even truncation scheme; else, we\n # keep track of the chi we specfied\n if chis is None:\n evenTrunc = False\n else:\n try:\n chis = list(chis)\n except TypeError:\n chis = [chis]\n chiSpec = max(chis)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n assert self.charge == 0\n assert self.dirs[0] + self.dirs[1] == 0\n assert set(zip(self.qhape[0], self.shape[0])) == set(\n zip(self.qhape[1], self.shape[1])\n )\n\n S_dtype = np.float_ if hermitian else np.complex_\n U_dtype = self.dtype if hermitian else np.complex_\n\n # Eigenvalue decompose each sector at a time.\n # While doing so, also keep track of a list of all eigenvalues, as well\n # as a heap that gives the negative of the absolute value of the\n # largest eigenvalue in each sector. These will be needed later when\n # deciding how to truncate the eigenvalues.\n eigdecomps = {}\n dims = {}\n minusabs_next_eigs = []\n all_eigs = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n # This matrix is empty and trivial.\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=U_dtype)\n s = np.empty((m,), dtype=S_dtype)\n eigdecomp = (s, u)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n if hermitian:\n s, u = spsla.eighs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n s, u = spsla.eigs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n if hermitian:\n s, u = np.linalg.eigh(v)\n else:\n s, u = np.linalg.eig(v)\n order = np.argsort(-np.abs(s))\n s = s[order]\n u = u[:, order]\n s = s.astype(S_dtype)\n u = u.astype(U_dtype)\n eigdecomp = (s, u)\n eigdecomps[k] = eigdecomp\n dims[k] = 0\n all_eigs.append(s)\n if 0 not in s.shape:\n heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))\n try:\n all_eigs = np.concatenate(all_eigs)\n except ValueError:\n all_eigs = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_eigs,\n eigdecomps,\n minusabs_next_eigs,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # truncate in both sectors evenly\n if evenTrunc and chiSpec == chi:\n # This piece of codes is only designed\n # with Z2 symmetry tensor in mind\n errmeg = \"The matrix should have two sectors (0,0) and (1,1).\"\n assert len(dims) == 2, errmeg\n if chiSpec % 2 == 0:\n dims[(0, 0)] = int(chiSpec / 2)\n dims[(1, 1)] = int(chiSpec / 2)\n else:\n dims[(0, 0)] = int((chiSpec + 1) / 2)\n dims[(1, 1)] = int((chiSpec - 1) / 2)\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n eigdecomps = {k: v for k, v in eigdecomps.items() if dims[k] > 0}\n for k, v in eigdecomps.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n eigdecomps[k] = (v[0][:d], v[1][:, :d])\n else:\n del eigdecomps[k]\n\n # Initialize S and U.\n d = self.dirs[0]\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=S_dtype,\n invar=False,\n charge=0,\n )\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=U_dtype,\n charge=0,\n )\n\n # Set the blocks of U, S and V.\n for k, v in eigdecomps.items():\n S[(k[0],)] = v[0]\n k_U = (k[0], k[0])\n U[k_U] = v[1]\n\n return S, U, rel_err", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def eigen(X):\n\n symmetric = np.alltrue(np.isclose(X - X.T, np.zeros(n)))\n small = max(X.shape) <= 11\n\n if symmetric:\n return jacobi(X)\n elif small:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm3(X, maxiter=maxiter)\n else:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm2(X, maxiter=maxiter)", "def scipy_eigsolver(\n kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],\n n_eigenpairs: int,\n is_symmetric: bool,\n is_stochastic: bool,\n):\n\n n_samples, n_features = kernel_matrix.shape\n\n # check only for n_eigenpairs == n_features and n_eigenpairs < n_features\n # wrong parametrized n_eigenpairs are catched in scipy functions\n if n_eigenpairs == n_features:\n if is_symmetric:\n scipy_eigvec_solver = scipy.linalg.eigh\n else:\n scipy_eigvec_solver = scipy.linalg.eig\n\n solver_kwargs: Dict[str, object] = {\n \"check_finite\": False\n } # should be already checked\n\n else: # n_eigenpairs < matrix.shape[1]\n if is_symmetric:\n scipy_eigvec_solver = scipy.sparse.linalg.eigsh\n else:\n scipy_eigvec_solver = scipy.sparse.linalg.eigs\n\n solver_kwargs = {\n \"k\": n_eigenpairs,\n \"which\": \"LM\",\n \"v0\": np.ones(n_samples),\n \"tol\": 1e-14,\n }\n\n # The selection of sigma is a result of a microbenchmark\n if is_symmetric and is_stochastic:\n # NOTE: it turned out that for self.kernel_.is_symmetric=False (-> eigs),\n # setting sigma=1 resulted into a slower computation.\n NUMERICAL_EXACT_BREAKER = 0.1\n solver_kwargs[\"sigma\"] = 1.0 + NUMERICAL_EXACT_BREAKER\n solver_kwargs[\"mode\"] = \"normal\"\n else:\n solver_kwargs[\"sigma\"] = None\n\n # the scipy solvers only work on floating points\n if scipy.sparse.issparse(\n kernel_matrix\n ) and kernel_matrix.data.dtype.kind not in [\"fdFD\"]:\n kernel_matrix = kernel_matrix.asfptype()\n elif isinstance(kernel_matrix, np.ndarray) and kernel_matrix.dtype != \"f\":\n kernel_matrix = kernel_matrix.astype(float)\n\n eigvals, eigvects = scipy_eigvec_solver(kernel_matrix, **solver_kwargs)\n\n return eigvals, eigvects", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def syevd(self,uplo_,n_,a_,w_):\n _a_minlength = ((n_) * (n_))\n if ((n_) * (n_)) > 0 and a_ is not None and len(a_) != ((n_) * (n_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((n_) * (n_))))\n if isinstance(a_,numpy.ndarray) and not a_.flags.writeable:\n raise ValueError(\"Argument a must be writable\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n _w_minlength = (n_)\n if (n_) > 0 and w_ is not None and len(w_) != (n_):\n raise ValueError(\"Array argument w is not long enough: Is %d, expected %d\" % (len(w_),(n_)))\n if isinstance(w_,numpy.ndarray) and not w_.flags.writeable:\n raise ValueError(\"Argument w must be writable\")\n if w_ is None:\n raise ValueError(\"Argument w may not be None\")\n if isinstance(w_, numpy.ndarray) and w_.dtype is numpy.dtype(numpy.float64) and w_.flags.contiguous:\n _w_copyarray = False\n _w_tmp = ctypes.cast(w_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif w_ is not None:\n _w_copyarray = True\n _w_np_tmp = numpy.zeros(len(w_),numpy.dtype(numpy.float64))\n _w_np_tmp[:] = w_\n assert _w_np_tmp.flags.contiguous\n _w_tmp = ctypes.cast(_w_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _w_copyarray = False\n _w_tmp = None\n \n res = __library__.MSK_XX_syevd(self.__nativep,uplo_,n_,_a_tmp,_w_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _a_copyarray:\n a_[:] = _a_np_tmp\n if _w_copyarray:\n w_[:] = _w_np_tmp", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def compute_eigvals(theta, num_wires): # pylint: disable=arguments-differ\n eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)\n\n if qml.math.get_interface(theta) == \"tensorflow\":\n theta = qml.math.cast_like(theta, 1j)\n eigs = qml.math.cast_like(eigs, 1j)\n\n if qml.math.ndim(theta) == 0:\n return qml.math.exp(-0.5j * theta * eigs)\n\n return qml.math.exp(qml.math.outer(-0.5j * theta, eigs))", "def eigs(self,num_eigvals,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num)\n eigvals, eigvecs = eigsh(ham,k=num_eigvals*num_sites,which='SM')\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(num_eigvals):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def test_eigen_basis_non_orthogonal():\n\n basis = [HarmonicOscillator(n=i, center=0.25) for i in range(2)] + [\n HarmonicOscillator(n=i, center=-0.25) for i in range(2)\n ]\n\n S = pairwise_array_from_func(basis, Overlap())\n H = pairwise_array_from_func(basis, Hamiltonian(Harmonic(center=0.0)))\n eigb = EigenBasis.from_basis(basis, H, S)\n\n # check the first 3 energy levels, we won't have converged\n # the higher ones wrt basis set size\n expected_energies = [(n + 0.5) for n in range(3)]\n\n diffs = [e1 - e2 for e1, e2 in zip(sorted(eigb.energies), expected_energies)]\n\n # a little lenient due to convergence of basis to keep test fast\n assert all(math.isclose(d, 0.0, abs_tol=1e-3) for d in diffs)", "def eig(C):\r\n\r\n # class eig(object):\r\n # def __call__(self, C):\r\n\r\n # Householder transformation of a symmetric matrix V into tridiagonal form.\r\n # -> n : dimension\r\n # -> V : symmetric nxn-matrix\r\n # <- V : orthogonal transformation matrix:\r\n # tridiag matrix == V * V_in * V^t\r\n # <- d : diagonal\r\n # <- e[0..n-1] : off diagonal (elements 1..n-1)\r\n\r\n # Symmetric tridiagonal QL algorithm, iterative\r\n # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations\r\n # -> n : Dimension.\r\n # -> d : Diagonale of tridiagonal matrix.\r\n # -> e[1..n-1] : off-diagonal, output from Householder\r\n # -> V : matrix output von Householder\r\n # <- d : eigenvalues\r\n # <- e : garbage?\r\n # <- V : basis of eigenvectors, according to d\r\n\r\n\r\n # tred2(N, B, diagD, offdiag); B=C on input\r\n # tql2(N, diagD, offdiag, B);\r\n\r\n # private void tred2 (int n, double V[][], double d[], double e[]) {\r\n def tred2 (n, V, d, e):\r\n # This is derived from the Algol procedures tred2 by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # factor 1.5 in 30-D\r\n\r\n for j in range(n):\r\n d[j] = V[n-1][j] # d is output argument\r\n\r\n # Householder reduction to tridiagonal form.\r\n\r\n for i in range(n-1,0,-1):\r\n # Scale to avoid under/overflow.\r\n h = 0.0\r\n if not num_opt:\r\n scale = 0.0\r\n for k in range(i):\r\n scale = scale + abs(d[k])\r\n else:\r\n scale = sum(abs(d[0:i]))\r\n\r\n if scale == 0.0:\r\n e[i] = d[i-1]\r\n for j in range(i):\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n V[j][i] = 0.0\r\n else:\r\n\r\n # Generate Householder vector.\r\n if not num_opt:\r\n for k in range(i):\r\n d[k] /= scale\r\n h += d[k] * d[k]\r\n else:\r\n d[:i] /= scale\r\n h = np.dot(d[:i],d[:i])\r\n\r\n f = d[i-1]\r\n g = h**0.5\r\n\r\n if f > 0:\r\n g = -g\r\n\r\n e[i] = scale * g\r\n h = h - f * g\r\n d[i-1] = f - g\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] = 0.0\r\n else:\r\n e[:i] = 0.0\r\n\r\n # Apply similarity transformation to remaining columns.\r\n\r\n for j in range(i):\r\n f = d[j]\r\n V[j][i] = f\r\n g = e[j] + V[j][j] * f\r\n if not num_opt:\r\n for k in range(j+1, i):\r\n g += V[k][j] * d[k]\r\n e[k] += V[k][j] * f\r\n e[j] = g\r\n else:\r\n e[j+1:i] += V.T[j][j+1:i] * f\r\n e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])\r\n\r\n f = 0.0\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] /= h\r\n f += e[j] * d[j]\r\n else:\r\n e[:i] /= h\r\n f += np.dot(e[:i],d[:i])\r\n\r\n hh = f / (h + h)\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] -= hh * d[j]\r\n else:\r\n e[:i] -= hh * d[:i]\r\n\r\n for j in range(i):\r\n f = d[j]\r\n g = e[j]\r\n if not num_opt:\r\n for k in range(j, i):\r\n V[k][j] -= (f * e[k] + g * d[k])\r\n else:\r\n V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])\r\n\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n\r\n d[i] = h\r\n # end for i--\r\n\r\n # Accumulate transformations.\r\n\r\n for i in range(n-1):\r\n V[n-1][i] = V[i][i]\r\n V[i][i] = 1.0\r\n h = d[i+1]\r\n if h != 0.0:\r\n if not num_opt:\r\n for k in range(i+1):\r\n d[k] = V[k][i+1] / h\r\n else:\r\n d[:i+1] = V.T[i+1][:i+1] / h\r\n\r\n for j in range(i+1):\r\n if not num_opt:\r\n g = 0.0\r\n for k in range(i+1):\r\n g += V[k][i+1] * V[k][j]\r\n for k in range(i+1):\r\n V[k][j] -= g * d[k]\r\n else:\r\n g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])\r\n V.T[j][:i+1] -= g * d[:i+1]\r\n\r\n if not num_opt:\r\n for k in range(i+1):\r\n V[k][i+1] = 0.0\r\n else:\r\n V.T[i+1][:i+1] = 0.0\r\n\r\n\r\n if not num_opt:\r\n for j in range(n):\r\n d[j] = V[n-1][j]\r\n V[n-1][j] = 0.0\r\n else:\r\n d[:n] = V[n-1][:n]\r\n V[n-1][:n] = 0.0\r\n\r\n V[n-1][n-1] = 1.0\r\n e[0] = 0.0\r\n\r\n\r\n # Symmetric tridiagonal QL algorithm, taken from JAMA package.\r\n # private void tql2 (int n, double d[], double e[], double V[][]) {\r\n # needs roughly 3N^3 operations\r\n def tql2 (n, d, e, V):\r\n\r\n # This is derived from the Algol procedures tql2, by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # using vectors from numpy makes it faster\r\n\r\n if not num_opt:\r\n for i in range(1,n): # (int i = 1; i < n; i++):\r\n e[i-1] = e[i]\r\n else:\r\n e[0:n-1] = e[1:n]\r\n e[n-1] = 0.0\r\n\r\n f = 0.0\r\n tst1 = 0.0\r\n eps = 2.0**-52.0\r\n for l in range(n): # (int l = 0; l < n; l++) {\r\n\r\n # Find small subdiagonal element\r\n\r\n tst1 = max(tst1, abs(d[l]) + abs(e[l]))\r\n m = l\r\n while m < n:\r\n if abs(e[m]) <= eps*tst1:\r\n break\r\n m += 1\r\n\r\n # If m == l, d[l] is an eigenvalue,\r\n # otherwise, iterate.\r\n\r\n if m > l:\r\n iiter = 0\r\n while 1: # do {\r\n iiter += 1 # (Could check iteration count here.)\r\n\r\n # Compute implicit shift\r\n\r\n g = d[l]\r\n p = (d[l+1] - g) / (2.0 * e[l])\r\n r = (p**2 + 1)**0.5 # hypot(p,1.0)\r\n if p < 0:\r\n r = -r\r\n\r\n d[l] = e[l] / (p + r)\r\n d[l+1] = e[l] * (p + r)\r\n dl1 = d[l+1]\r\n h = g - d[l]\r\n if not num_opt:\r\n for i in range(l+2, n):\r\n d[i] -= h\r\n else:\r\n d[l+2:n] -= h\r\n\r\n f = f + h\r\n\r\n # Implicit QL transformation.\r\n\r\n p = d[m]\r\n c = 1.0\r\n c2 = c\r\n c3 = c\r\n el1 = e[l+1]\r\n s = 0.0\r\n s2 = 0.0\r\n\r\n # hh = V.T[0].copy() # only with num_opt\r\n for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {\r\n c3 = c2\r\n c2 = c\r\n s2 = s\r\n g = c * e[i]\r\n h = c * p\r\n r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])\r\n e[i+1] = s * r\r\n s = e[i] / r\r\n c = p / r\r\n p = c * d[i] - s * g\r\n d[i+1] = h + s * (c * g + s * d[i])\r\n\r\n # Accumulate transformation.\r\n\r\n if not num_opt: # overall factor 3 in 30-D\r\n for k in range(n): # (int k = 0; k < n; k++) {\r\n h = V[k][i+1]\r\n V[k][i+1] = s * V[k][i] + c * h\r\n V[k][i] = c * V[k][i] - s * h\r\n else: # about 20% faster in 10-D\r\n hh = V.T[i+1].copy()\r\n # hh[:] = V.T[i+1][:]\r\n V.T[i+1] = s * V.T[i] + c * hh\r\n V.T[i] = c * V.T[i] - s * hh\r\n # V.T[i] *= c\r\n # V.T[i] -= s * hh\r\n\r\n p = -s * s2 * c3 * el1 * e[l] / dl1\r\n e[l] = s * p\r\n d[l] = c * p\r\n\r\n # Check for convergence.\r\n if abs(e[l]) <= eps*tst1:\r\n break\r\n # } while (Math.abs(e[l]) > eps*tst1);\r\n\r\n d[l] = d[l] + f\r\n e[l] = 0.0\r\n\r\n\r\n # Sort eigenvalues and corresponding vectors.\r\n if 11 < 3:\r\n for i in range(n-1): # (int i = 0; i < n-1; i++) {\r\n k = i\r\n p = d[i]\r\n for j in range(i+1, n): # (int j = i+1; j < n; j++) {\r\n if d[j] < p: # NH find smallest k>i\r\n k = j\r\n p = d[j]\r\n\r\n if k != i:\r\n d[k] = d[i] # swap k and i\r\n d[i] = p\r\n for j in range(n): # (int j = 0; j < n; j++) {\r\n p = V[j][i]\r\n V[j][i] = V[j][k]\r\n V[j][k] = p\r\n # tql2\r\n\r\n N = len(C[0])\r\n if 11 < 3:\r\n V = np.array([x[:] for x in C]) # copy each \"row\"\r\n N = V[0].size\r\n d = np.zeros(N)\r\n e = np.zeros(N)\r\n else:\r\n V = [[x[i] for i in xrange(N)] for x in C] # copy each \"row\"\r\n d = N * [0.]\r\n e = N * [0.]\r\n\r\n tred2(N, V, d, e)\r\n tql2(N, d, e, V)\r\n return (array(d), array(V))", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def compute_eigendecomp(self, atol=1e-13, rtol=None):\n self.eigvals, self.eigvecs = parallel.call_and_bcast(\n util.eigh, self.correlation_array, atol=atol, rtol=rtol,\n is_positive_definite=True)", "def diagonalize_asymm(H):\n E,C = np.linalg.eig(H)\n #if np.allclose(E.imag, 0*E.imag):\n # E = np.real(E)\n #else:\n # print \"WARNING: Eigenvalues are complex, will be returned as such.\"\n\n idx = E.real.argsort()\n E = E[idx]\n C = C[:,idx]\n\n return E,C", "def symeigLanczos(A, k, extreme=\"both\", *, sparse=False, dim=None):\n Qk, T = Lanczos(A, k, sparse=sparse, dim=dim)\n eigvalsQ, eigvectorsQ = torch.symeig(T, eigenvectors=True)\n eigvectorsQ = torch.matmul(Qk, eigvectorsQ)\n if extreme == \"both\":\n return eigvalsQ[0], eigvectorsQ[:, 0], eigvalsQ[-1], eigvectorsQ[:, -1]\n elif extreme == \"min\":\n return eigvalsQ[0], eigvectorsQ[:, 0]\n elif extreme == \"max\":\n return eigvalsQ[-1], eigvectorsQ[:, -1]", "def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")", "def calculate_eigenvalues(self):\n self.__eigenvalues = []\n dictionary = np.linalg.eig(np.array(self.__A))\n indicator = True\n sum1 = 0\n for i in range(self.__A.shape[0]):\n if all(self.__A[i, j] == 0 for j in range(self.__A.shape[1])):\n indicator = all(self.__B[i,j] for j in range(self.__B.shape[1]))\n if (indicator):\n sum1 += 1\n \n for val in dictionary[0]:\n if (val != 0):\n self.__eigenvalues.append(complex(val))\n elif (indicator) and (sum1 > 0):\n sum1 -= 1\n self.__eigenvalues.append(complex(val))", "def Ham_eigvals(self,kx,ky):\n tHam=self.Ham_gen(kx,ky)\n eigval=np.linalg.eigvals(tHam)\n sidc=eigval.argsort()\n eigval=eigval[sidc]\n return eigval.real", "def eigensystemtomatrix(ew1,ew2,sint):\n if abs(ew1-ew2)/float(ew1)<0.0001:\n raise Exception(\"Can't deal with equal eigenvalues\")\n cost = np.sqrt(1-sint*sint)\n x = abs((ew1-ew2))*cost*sint\n if (ew1>ew2 and sint<1/np.sqrt(2)) or (ew1<ew2 and sint>1/np.sqrt(2)):\n a = 0.5*(ew1+ew2)+np.sqrt(0.25*(ew1+ew2)**2-ew1*ew2-x*x)\n else:\n a = 0.5*(ew1+ew2)-np.sqrt(0.25*(ew1+ew2)**2-ew1*ew2-x*x)\n b = ew1+ew2-a\n return a,b,x", "def get_su_eig(self, delcc):\n pc = SimpleNamespace()\n h = self.h\n if self.rbsize:\n self._inv_mrb()\n if h:\n pc.G = h\n pc.A = h * h / 3\n pc.Ap = h / 2\n if self.unc:\n pv = self._el\n else:\n pv = np.ix_(self._el, self._el)\n if self.m is not None:\n self.m = self.m[pv]\n self.k = self.k[pv]\n self.b = self.b[pv]\n self.kdof = self.nonrf[self._el]\n self.ksize = self.kdof.size\n\n self._el = np.arange(self.ksize) # testing ...\n self._rb = np.arange(0)\n\n if self.elsize:\n self._inv_m()\n A = self._build_A()\n eig_info = eigss(A, delcc)\n pc.wn = eig_info.wn\n pc.zeta = eig_info.zeta\n pc.eig_success = eig_info.eig_success\n if h:\n self._get_complex_su_coefs(pc, eig_info.lam, h)\n self._add_partition_copies(pc, eig_info.lam, eig_info.ur, eig_info.ur_inv)\n return pc", "def _compute_dualEigenmatrix(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"Q\"):\n return\n if self._has(\"q\"):\n self._.Q = self._compute_eigenmatrix(self._.q, expand=expand,\n factor=factor,\n simplify=simplify)\n else:\n if not self._has(\"P\"):\n self.eigenmatrix(expand=expand, factor=factor,\n simplify=simplify)\n self._.Q = self._.n * self._.P.inverse()\n self._check_eigenmatrices()", "def eigen_vector_i_all(self):\n return self._eig_vec", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def _compute_eigenmatrix(self, k, tr, expand=False, factor=False,\n simplify=False):\n if not self._has(\"omega\"):\n self.cosineSequences(expand=expand, factor=factor,\n simplify=simplify)\n return Matrix(SR, [[self._.omega[tr(i, j)] * k[j]\n for j in range(self._.d + 1)]\n for i in range(self._.d + 1)])" ]
[ "0.81746787", "0.65293", "0.6337791", "0.6227501", "0.61773306", "0.61583877", "0.6139027", "0.6113723", "0.6071404", "0.5926805", "0.5913415", "0.5867152", "0.5759248", "0.574515", "0.5692723", "0.5674269", "0.5673998", "0.56041884", "0.5596572", "0.55924916", "0.5584967", "0.5577579", "0.55599546", "0.55443984", "0.55229545", "0.55009127", "0.5495508", "0.54832166", "0.5481329", "0.54756147" ]
0.7566581
1
Stops all threads and delete all handles used by the license system. licensecleanup()
def licensecleanup(): res = __library__.MSK_XX_licensecleanup() if res != 0: raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def licensecleanup(): # 3\n res = _msk.Env.licensecleanup()\n if res != 0:\n raise Error(rescode(res),\"\")", "def cleanup(self):\n logger.debug('Beginning cleanup ...')\n self.stop()\n\n #Clear subscriptions\n for sub, dev in self.subs.items():\n dev.clear_sub(sub)\n\n #Clear databases\n self.subs.clear()\n self.cmds.clear()\n\n #Remove pcaspy internals\n self._thread = None\n self.driver = None\n self.server = None\n logger.info('Cleanup finished')", "def cleanup():\n for th in THREAD_REGISTER.values():\n th.exit()\n th.join(timeout=3)", "def cleanup(self):\r\n # XXX should be fixed properly!!!\r\n try:\r\n self.unlock()\r\n except:\r\n pass", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def cleanup():\n dist.destroy_process_group()", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def cleanup():", "def clean(self):\n # Wait for all threads to finish\n [x.join() for x in self.__threads]\n\n if self.recording:\n self.record_file.close()\n\n if self.simulating:\n self.simulate_file.close()\n\n self.s.shutdown(socket.SHUT_RDWR)\n self.s.close()\n print(\"Cleaned\")", "def cleanup(self):\n if self.__cleaned_up:\n return\n\n if (\n self._log_to_file is not None\n and self._log_to_file\n and self._log_file is not None\n ):\n close_file_safe(self._log_file)\n\n if self.acquiring:\n self.stop_acquisition()\n\n for _, acq in self._acquisition_controllers.items():\n acq.cleanup()\n\n self._acquisition_controllers = []\n if self._comms_stream is not None:\n self._comms_stream.stop()\n time.sleep(0.5)\n self._comms_stream = None\n\n self._logging_socket.close()\n self._control_socket.close()\n\n self.__cleaned_up = True\n self.active = False\n\n logging.info(\"Exiting RCC Conductor.\")", "def cleanup(self):\r\n self.stop()\r\n self.PWM.stop() # stop the PWM output\r", "def _cleanup():\n for itr_ref in ITERATORS_LIST:\n if context:\n device_type = context.get_context(\"device_target\")\n if device_type == \"GPU\":\n itr_ref.release()\n else:\n itr = itr_ref()\n if itr is not None:\n itr.release()\n else:\n itr = itr_ref()\n if itr is not None:\n itr.release()", "def __del__(self):\n self.shutdown()", "def cleanupResources():\n None", "def __del__(self):\r\n self.cleanup()", "def __del__(self):\n self._cleanup()", "def cleanup(self):\n process_handler.terminate_root_and_child_processes(self._proc.pid)\n self._read_thread.join()\n if self._data_dir:\n shutil.rmtree(self._data_dir, ignore_errors=True)", "def __del__(self):\n self.p.sleep()\n GPIO.cleanup()", "def cleanup_and_exit():\n logger.warn(\"Terminating the program\")\n try:\n for key in connections:\n try:\n connections[key].disconnect()\n except AttributeError:\n pass\n for s in sensors:\n try:\n sensors[s].cleanup()\n except AttributeError:\n pass\n except:\n pass\n sys.exit(0)", "def cleanup(self):\n self.GP.cleanup()", "def clean_up(self):\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def __del__(self):\n self.motorStop()\n GPIO.cleanup()", "def destroy(self):\n\n dcgm_agent.dcgmShutdown()\n self._thread_pool.terminate()\n self._thread_pool.close()", "def cleanup(self):\n log(\"[%s] Cleaning up\" % (self.__class__.__name__))\n self.manager.close()\n self.capture.release()\n self.barrier.abort()", "def cleanup(self):\n GPIO.cleanup(self.chanlist)", "def clean_up(self):\n dist.destroy_process_group()", "def __del__(self):\n GPIO.cleanup()\n logging.info('GPIO Cleanup Complete')", "def dist_cleanup():\n dist.destroy_process_group()", "def _cleanup(self):\n \tself._gqcnn.close_session()" ]
[ "0.69909203", "0.6920652", "0.68741477", "0.67954016", "0.67419183", "0.66345876", "0.6611523", "0.65672064", "0.65559095", "0.65391237", "0.64915305", "0.64907163", "0.6485269", "0.64084905", "0.6398197", "0.6396374", "0.6393423", "0.6385401", "0.6373598", "0.63693166", "0.63563097", "0.6348484", "0.6338197", "0.63353664", "0.6314725", "0.6301608", "0.62943685", "0.62884134", "0.6286351", "0.6279324" ]
0.69317514
1
Construct a new Task object. Task(env=None,maxnumcon=0,maxnumvar=0,nativep=None,other=None)
def __init__(self,env=None,maxnumcon=0,maxnumvar=0,nativep=None,other=None): self.__library = __library__ self.__nativep = None self.__schandle = None self._lock = threading.RLock() if isinstance(env,Task): other = env env = None try: if nativep is not None: self.__nativep = nativep res = 0 elif other is not None: self.__nativep = ctypes.c_void_p() res = self.__library.MSK_XX_clonetask(other.__nativep, ctypes.byref(self.__nativep)) else: if not isinstance(env,Env): raise TypeError('Expected an Env for argument') self.__nativep = ctypes.c_void_p() res = self.__library.MSK_XX_maketask(env._getNativeP(),maxnumcon,maxnumvar,ctypes.byref(self.__nativep)) if res != 0: raise Error(rescode(res),"Error %d" % res) # user progress function: self.__progress_func = None self.__infocallback_func = None # callback proxy function definition: def progress_proxy(nativep, handle, caller, dinfptr, iinfptr, liinfptr): r = 0 try: if self.__progress_func or self.__infocallback_func: caller = callbackcode(caller) f = self.__infocallback_func if f is not None: r = f(caller, ctypes.cast(dinfptr, ctypes.POINTER(ctypes.c_double))[:len(dinfitem._values)] if dinfptr is not None else None, ctypes.cast(iinfptr, ctypes.POINTER(ctypes.c_int))[:len(iinfitem._values)] if iinfptr is not None else None, ctypes.cast(liinfptr,ctypes.POINTER(ctypes.c_longlong))[:len(liinfitem._values)] if liinfptr is not None else None, ) f = self.__progress_func if f is not None: r = f(caller) if not isinstance(r,int): r = 0 except: import traceback traceback.print_exc() return -1 return r # callback proxy C wrapper: self.__progress_cb = __progress_cb_type__(progress_proxy) # user stream functions: self.__stream_func = 4 * [ None ] # strema proxy functions and wrappers: self.__stream_cb = 4 * [ None ] for whichstream in xrange(4): # Note: Apparently closures doesn't work when the function is wrapped in a C function... So we use default parameter value instead. def stream_proxy(handle, msg, whichstream=whichstream): func = self.__stream_func[whichstream] try: if func is not None: func(msg) except: pass self.__stream_cb[whichstream] = __stream_cb_type__(stream_proxy) assert self.__nativep except: #import traceback #traceback.print_exc() if hasattr(self,'_Task__nativep') and self.__nativep is not None: #print "DELETE TASK 2",id(self) self.__library.MSK_XX_deletetask(ctypes.byref(self.__nativep)) self.__nativep = None raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, env: MTEnv, n_tasks: int):\n super().__init__(env=env)\n self.n_tasks = n_tasks\n self.tasks: List[TaskStateType]\n self._are_tasks_set = False", "def __init__(self,target, name = \"\", prio = 10, period = 0, time2run = 0):\n Task.taskid += 1\n self.tid = Task.taskid # Task ID\n self.target = target # create coroutine from given generator\n self.params = None # Value to send/receive\n self.prio = prio\n if name == \"\":\n self.name = \"task_%d\" % self.tid\n else:\n self.name = name\n self.period = period # zero: run now\n # negative: run once\n # positive: run at interval\n self.time2run = time.ticks_ms();\n if time2run>0:\n self.time2run += time2run\n else: \n self.time2run += period\n log.debug(\"Created task %s %d \", self.name,self.tid)\n self.target.send(None)", "def __init__(self, task, time_limit=float('inf'), random_state=None,\n n_sub_steps=None,\n raise_exception_on_physics_error=True,\n strip_singleton_obs_buffer_dim=False,\n max_reset_attempts=1):\n super(Environment, self).__init__(\n task=task,\n time_limit=time_limit,\n random_state=random_state,\n n_sub_steps=n_sub_steps,\n raise_exception_on_physics_error=raise_exception_on_physics_error,\n strip_singleton_obs_buffer_dim=strip_singleton_obs_buffer_dim)\n self._max_reset_attempts = max_reset_attempts\n self._reset_next_step = True", "def __init__(self, task, queue, semaphore=None, task_args=None,\n task_kwargs=None):\n multiprocessing.Process.__init__(self)\n self._task = task\n self._queue = queue\n self._semaphore = semaphore\n self._started = multiprocessing.Event()\n self._killing = multiprocessing.Event()\n self._output = None\n self._parent_pid = None\n self._task_args = task_args if task_args else ()\n self._task_kwargs = task_kwargs if task_kwargs else {}", "def __init__(self, env, noop_max=30):\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0", "def __init__(self, config, schema=None, **kwds):\n pipeBase.Task.__init__(self, config=config, **kwds)", "def __init__(self, benchmarks=None, rho=None, lamb=None, bias=None, **kwargs):\n\t\tTask.__init__(self, **kwargs)", "def __init__(self, task_type, task):\n self.task = task\n self.task_type = task_type", "def create_task():", "def __init__(self, task_params):\n self.seq_width = task_params[\"seq_width\"]\n self.min_seq_len = task_params[\"min_seq_len\"]\n self.max_seq_len = task_params[\"max_seq_len\"]\n self.min_repeat = task_params[\"min_repeat\"]\n self.max_repeat = task_params[\"max_repeat\"]\n self.in_dim = task_params['seq_width'] + 2\n self.out_dim = task_params['seq_width'] + 1", "def make_task(task_name, override_kwargs=None, max_code_length=100,\n require_correct_syntax=False,\n do_code_simplification=False,\n correct_bonus=2.0, code_length_bonus=1.0):\n logging.info('Making paper-config task.')\n n = 16 # Number of test cases.\n task_mapping = {\n 'print-hello': (\n PrintTask, dict(base=27, fixed_string=[8, 5, 12, 12, 15])),\n 'print': (PrintIntTask, dict(base=256, fixed_string=[1, 2, 3, 4, 5])),\n 'echo': (EchoTask, dict(base=27, min_length=1, max_length=6)),\n 'remove-char': (\n RemoveCharTask, dict(base=256, n=n, min_len=1, max_len=6)),\n 'reverse': (\n ReverseTask, dict(base=256, n=n, min_len=1, max_len=6)),\n 'reverse-tune': (\n ReverseTaskV2, dict(base=256, reward_type='static-bylen')),\n 'remove-char-tune': (RemoveCharTaskV2, dict(base=27)),\n 'prefix': (CommonPrefixTask, dict(base=27)),\n 'find': (FindSubStrTask, dict(base=27)),\n 'sort3': (SortFixedTaskV2, dict(base=27, n=150, length=3)),\n 'count-char': (CountCharTaskV2, dict(n=n, max_len=6)),\n 'bool-logic': (BooleanLogicTask, dict()),\n 'add': (AddTask, dict(n=9)),\n 'echo-twice': (EchoTwiceTask, dict(n=n)),\n 'echo-thrice': (EchoThriceTask, dict(n=n)),\n 'copy-reverse': (CopyReverseTask, dict(n=n)),\n 'zero-cascade': (EchoZeroCascadeTask, dict(n=n)),\n 'cascade': (EchoCascadeTask, dict(n=n)),\n 'shift-left': (ShiftLeftTask, dict(n=n)),\n 'shift-right': (ShiftRightTask, dict(n=n)),\n 'riffle': (RiffleTask, dict(n=n)),\n 'unriffle': (UnriffleTask, dict(n=n)),\n 'middle-char': (MiddleCharTask, dict(n=n)),\n 'remove-last': (RemoveLastTask, dict(n=n)),\n 'remove-last-two': (RemoveLastTwoTask, dict(n=n)),\n 'echo-alternating': (EchoAlternatingTask, dict(n=n)),\n 'echo-half': (EchoHalfTask, dict(n=n)),\n 'length': (LengthTask, dict(n=n)),\n 'echo-second-seq': (EchoSecondSequenceTask, dict(n=n)),\n 'echo-nth-seq': (EchoNthSequenceTask, dict(n=n)),\n 'substring': (SubstringTask, dict(n=n)),\n 'divide-2': (Divide2Task, dict(n=n)),\n 'dedup': (DedupTask, dict(n=n)),\n 'remove-target-char': (RemoveTargetCharTask, dict(n=n)),\n 'list-index': (ListIndexTask, dict(n=n)),\n 'fib': (FibonacciTask, dict()),\n 'count-down': (BottlesOfBeerTask, dict()),\n 'split': (SplitTask, dict()),\n 'trim-left': (TrimLeftTask, dict()),\n 'circle-route': (\n JudgeRouteCircleTask, dict(n=100, max_len=32)),\n 'multiply': (MultiplyTask, dict(n=100)),\n 'divmod': (DivModTask, dict(n=100)),\n }\n\n if task_name not in task_mapping:\n # Test tasks.\n if task_name == 'test-hill-climb':\n return test_tasks.BasicTaskManager(test_tasks.HillClimbingTask())\n raise ValueError('Unknown task type \"%s\"' % task_name)\n task_cls, kwargs = task_mapping[task_name]\n\n if override_kwargs:\n if not isinstance(override_kwargs, dict):\n raise ValueError(\n 'override_kwargs must be a dict, got: %s', override_kwargs)\n kwargs.update(override_kwargs)\n\n task = task_cls(**kwargs)\n\n reward_fn = r.absolute_distance_reward\n # reward_fn = r.absolute_mod_distance_reward\n # reward_fn = r.absolute_log_distance_reward\n logging.info('Using reward function: %s', reward_fn.__name__)\n\n # We want reward with and without code simplification to be scaled the same\n # way. Without code simplification, give the maximum code length bonus\n # every time.\n min_code_length = 0.0 if do_code_simplification else max_code_length\n\n return MultiIOTaskManager(\n task=task, correct_bonus=correct_bonus,\n code_length_bonus=code_length_bonus,\n max_code_length=max_code_length, min_code_length=min_code_length,\n reward_fn=reward_fn, require_correct_syntax=require_correct_syntax)", "def __init__(self):\n Task.__init__(self)", "def __init__(self, gator, max_tasks=0, to_consume=ALL, nap_time=0.1):\n self.gator = gator\n self.max_tasks = int(max_tasks)\n self.to_consume = to_consume\n self.nap_time = nap_time\n self.tasks_complete = 0", "def __call__(self, *args, **kw):\n return Task(self, **self.__options)(*args, **kw)", "def __init__(self, id: str, description: str, max_number_of_parallel_tasks: int,\n source_db_alias: str, target_db_alias: str, schema_name: str,\n commands_before: [Command] = None, commands_after: [Command] = None) -> None:\n\n ParallelTask.__init__(self, id=id, description=description,\n max_number_of_parallel_tasks=max_number_of_parallel_tasks,\n commands_before=commands_before, commands_after=commands_after)\n\n self.source_db_alias = source_db_alias\n self.target_db_alias = target_db_alias\n self.schema_name = schema_name", "def __init__(self, isParent):\n UTIL.TASK.ProcessingTask.__init__(self, isParent=isParent)", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n self.__pythonExecutable\n )", "def parse_vec_task(args: argparse.Namespace, cfg: dict) -> VecTaskPython:\n # create native task and pass custom config\n if args.task_type == \"Python\":\n # check device on which to run agent and environment\n if args.device == \"CPU\":\n print_info(\"Running using python CPU...\")\n # check if agent is on different device\n sim_device = 'cpu'\n ppo_device = 'cuda:0' if args.ppo_device == \"GPU\" else 'cpu'\n else:\n print_info(\"Running using python GPU...\")\n sim_device = 'cuda:0'\n ppo_device = 'cuda:0'\n # create the IsaacEnvBase defined using leibnizgym\n try:\n task = eval(args.task)(config=cfg, device=sim_device,\n visualize=not args.headless,\n verbose=args.verbose)\n except NameError:\n raise InvalidTaskNameError(args.task)\n # wrap environment around vec-python wrapper\n env = VecTaskPython(task, rl_device=ppo_device, clip_obs=5, clip_actions=1)\n else:\n raise ValueError(f\"No task of type `{args.task_type}` in leibnizgym.\")\n\n return env", "def task_init(self, param1):\n raise NotImplementedError", "def __init__(self, new_task_name=''):\r\n self._handle = lib_importer.task_handle(0)\r\n\r\n cfunc = lib_importer.windll.DAQmxCreateTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n ctypes_byte_str,\r\n ctypes.POINTER(lib_importer.task_handle)]\r\n\r\n error_code = cfunc(\r\n new_task_name, ctypes.byref(self._handle))\r\n check_for_error(error_code)\r\n\r\n self._initialize(self._handle)", "def __init__(self, num_workers, eval_function, timeout=None, maxtasksperchild=None):\n self.eval_function = eval_function\n self.timeout = timeout\n self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild)", "def __init__(self):\n\t\t\n\t\tsuper(SystemMemUtilTask, self).__init__(sensorName = ConfigConst.MEM_UTIL_NAME)", "def __init__(self, parser: Parser, processor: Processor, task: Node) -> None:\n self.parser = parser\n self.task = task\n self.processor = processor\n self.default_time = -1\n self.cost = -1\n self.calculate()\n self.time = self.default_time\n self.enabled = False\n self.delayed = False", "def __init__(self, task, time_limit=float('inf'), random_state=None,\n n_sub_steps=None,\n raise_exception_on_physics_error=True,\n strip_singleton_obs_buffer_dim=False):\n self._task = task\n if not isinstance(random_state, np.random.RandomState):\n self._random_state = np.random.RandomState(random_state)\n else:\n self._random_state = random_state\n self._hooks = _EnvironmentHooks(self._task)\n self._time_limit = time_limit\n self._raise_exception_on_physics_error = raise_exception_on_physics_error\n self._strip_singleton_obs_buffer_dim = strip_singleton_obs_buffer_dim\n\n if n_sub_steps is not None:\n warnings.simplefilter('once', DeprecationWarning)\n warnings.warn('The `n_sub_steps` argument is deprecated. Please override '\n 'the `control_timestep` property of the task instead.',\n DeprecationWarning)\n self._overridden_n_sub_steps = n_sub_steps\n\n self._recompile_physics_and_update_observables()", "def __init__(\n self,\n train_X: Tensor,\n train_Y: Tensor,\n train_Yvar: Optional[Tensor],\n task_feature: int,\n output_tasks: Optional[List[int]] = None,\n rank: Optional[int] = None,\n outcome_transform: Optional[OutcomeTransform] = None,\n input_transform: Optional[InputTransform] = None,\n pyro_model: Optional[PyroModel] = None,\n ) -> None:\n if not (\n train_X.ndim == train_Y.ndim == 2\n and len(train_X) == len(train_Y)\n and train_Y.shape[-1] == 1\n ):\n raise ValueError(\n \"Expected train_X to have shape n x d and train_Y to have shape n x 1\"\n )\n if train_Yvar is not None and train_Y.shape != train_Yvar.shape:\n raise ValueError(\n \"Expected train_Yvar to be None or have the same shape as train_Y\"\n )\n with torch.no_grad():\n transformed_X = self.transform_inputs(\n X=train_X, input_transform=input_transform\n )\n if outcome_transform is not None:\n train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)\n if train_Yvar is not None: # Clamp after transforming\n train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL)\n\n super().__init__(\n train_X=train_X,\n train_Y=train_Y,\n train_Yvar=train_Yvar,\n task_feature=task_feature,\n output_tasks=output_tasks,\n )\n self.to(train_X)\n\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None\n self.register_buffer(\"latent_features\", None)\n if pyro_model is None:\n pyro_model = MultitaskSaasPyroModel()\n pyro_model.set_inputs(\n train_X=transformed_X,\n train_Y=train_Y,\n train_Yvar=train_Yvar,\n task_feature=task_feature,\n task_rank=rank,\n )\n self.pyro_model = pyro_model\n if outcome_transform is not None:\n self.outcome_transform = outcome_transform\n if input_transform is not None:\n self.input_transform = input_transform", "def __init__(self):\n abstracttask.Task.__init__(self)\n self._ticket_id = ''# this is an implementation detail of jutda task tracker\n self.timespent = datetime.timedelta(0) # not editable permenently, but saves data from hours\n self.starttime = datetime.datetime.now() # ticket creation time in this implementation \n self.isappointment = False # always false for these\n self.followups = [] # not likely to be used, since other implementation doesn't have it.\n self._orig = None\n self.submitter_email = None", "def __init__(self, env, noop_max=30):\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'", "def __init__(self, env, noop_max=30):\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'", "def __init__(self, env, noop_max=30):\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'", "def __init__(self, function, max_eval_concurrency, assert_omp=True,\n base_model=None):\n self.base_model = base_model\n self.set_max_eval_concurrency(max_eval_concurrency)\n self.num_evaluations = 0\n self.assert_omp = assert_omp\n self.pool_function = function" ]
[ "0.6326176", "0.603982", "0.60264874", "0.6022742", "0.59993345", "0.59907025", "0.5958645", "0.5920431", "0.58973324", "0.58927375", "0.5887594", "0.5862502", "0.58127207", "0.5787428", "0.5779538", "0.57600564", "0.5736297", "0.57311445", "0.57209444", "0.5698059", "0.568281", "0.5674122", "0.5673557", "0.56715137", "0.5669925", "0.56473583", "0.56460273", "0.56460273", "0.56460273", "0.5609545" ]
0.7289506
0
Set the progress callback function. If func is None, progress callbacks are detached and disabled.
def set_Progress(self,func): if func is None: self.__progress_func = None #res = self.__library.MSK_XX_putcallbackfunc(self.__nativep,None,None) else: self.__progress_func = func res = self.__library.MSK_XX_putcallbackfunc(self.__nativep,self.__progress_cb,None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_Progress(self,func):\n self.__obj.set_Progress(func)", "def progress_callback(self, func):\n self.curl.setopt(pycurl.PROGRESSFUNCTION, func)", "def svn_client_ctx_t_progress_func_set(svn_client_ctx_t_self, svn_ra_progress_notify_func_t_progress_func): # real signature unknown; restored from __doc__\n pass", "def register_callback(self, func):\n self.callback = func", "def func(self, func):\n if func is None:\n raise ValueError(\"Invalid value for `func`, must not be `None`\") # noqa: E501\n\n self._func = func", "def Progress(func):\n def wrapper(*args, **kwargs):\n bar = ttk.Progressbar(master = root, length = 100, mode = 'indeterminate')\n bar.grid(column = 1, row = 11)\n bar.start()\n time.sleep(2)\n result = func(*args, **kwargs)\n try:\n time.sleep(2)\n bar.stop()\n bar.destroy()\n except:\n pass\n return result\n return wrapper", "def setTransitionCallback(self, func):\n self._transitionCallbackFunc = func", "def registerProgressCallback(self, callback):\n assert False, \"Deriving class must implement\"", "def set_func(self, func: FinalTargetType) -> None:\n if self.func is not None:\n raise Exception(\"Can't change func\")\n\n self.func = func\n\n # pylint: disable=import-outside-toplevel\n from .command import Command\n if iscoroutinefunction(func) or (isinstance(func, Command) and func.is_async):\n # pylint: disable=import-outside-toplevel\n from .units import ASYNC_UNIT\n self.add_target(ASYNC_UNIT)", "def _set_func(self, func):\n if callable(func):\n self._func = func\n else:\n raise TypeError(\"'func should be callable'\")", "def set_progress(self, progress: float):", "def set_callback(self,callback = None):\n self.callback = callback", "def set_InfoCallback(self,func):\n if func is None:\n self.__infocallback_func = None\n #res = self.__library.MSK_XX_putcallbackfunc(self.__nativep,None,None)\n else:\n self.__infocallback_func = func \n res = self.__library.MSK_XX_putcallbackfunc(self.__nativep,self.__progress_cb,None)", "def callback(self, function: Optional[Callable[[int], None]]) -> None:", "def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func", "def setProgress(self, prog):\n\t\tself.progress = prog", "def with_progress_bar(func, totalCalls, prefix = '', postfix='', isBytes=False):\n from multiprocessing import Value, Lock\n completed = Value('d', 0)\n lock = Lock()\n\n def progress(*args, **kwargs):\n with lock:\n completed.value +=1\n synapseclient.utils.printTransferProgress(completed.value, totalCalls, prefix, postfix, isBytes)\n return func(*args, **kwargs)\n return progress", "def set_error_callback(self, cb_func):\n self._error_callback = cb_func", "def svn_client_ctx_t_progress_baton_set(svn_client_ctx_t_self, void_progress_baton): # real signature unknown; restored from __doc__\n pass", "def set_pfunc(self, func):\n if (self.operator in _cst.list_FIELD_OPERATORS) or (len(self.func_arguments) == 0) :\n from .utils import function\n self.pfunc = function(func, space=self.space)\n else:\n raise(\"Not used anymore. Dead code\")", "def register_iden_progress_changed_callback(self, callback=None):\r\n return self._arm.register_iden_progress_changed_callback(callback=callback)", "def set_func(self, function):\n self.get(COMMAND_UIC, 'SetFunc', [('function', function)])", "def on_set(self, callback):\n self._set_callback = callback if callable(callback) else _void", "def on_set(self, callback):\n self._set_callback = callback if callable(callback) else _void", "def set_callback(self, callback):\n if not callable(callback):\n raise TypeError(\"'callback' must be callable\")\n\n self._callback = callback", "def progress_thread_run(func):\n def newfunc(self, *args, **kwargs):\n \"\"\"Wrapper function.\"\"\"\n # pylint: disable=W0212\n try:\n func(self, *args, **kwargs)\n finally:\n gtk.gdk.threads_enter()\n if self._pdialog.top_widget:\n self._pdialog.top_widget.destroy()\n print _(\"Thread done\")\n gtk.gdk.threads_leave()\n return newfunc", "def setProgress(self, progress):\n\t\tself.config.PROGRESS = [progress]", "def set_write_callback(self, attr, cb=None):\n self.write_cbs[attr] = cb", "def setprofile(self, w_func):\n if self.space.is_w(w_func, self.space.w_None):\n self.profilefunc = None\n self.w_profilefuncarg = None\n else:\n self.setllprofile(app_profile_call, w_func)", "def _setProgress(self, progress):\n # print \"Progress set %.2f --------------------------------\" % progress\n\n if progress > 100.0:\n progress = 100.0\n\n self._progress = progress\n self._progressChangedNotifier.notify(self)" ]
[ "0.76210636", "0.73132217", "0.67322624", "0.62459373", "0.62381303", "0.62043667", "0.6173967", "0.5974019", "0.59440875", "0.59214973", "0.5781422", "0.5741869", "0.56839746", "0.5624882", "0.5602486", "0.55898565", "0.55845433", "0.5564059", "0.552527", "0.548637", "0.5454636", "0.542823", "0.5423184", "0.5423184", "0.5395477", "0.53812623", "0.53403527", "0.53376216", "0.53102356", "0.5302797" ]
0.7993551
0
Analyze the data of a task. analyzeproblem(self,whichstream_)
def analyzeproblem(self,whichstream_): res = __library__.MSK_XX_analyzeproblem(self.__nativep,whichstream_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyzeproblem(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.analyzeproblem(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solutionsummary(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.solutionsummary(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def readsummary(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.readsummary(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solutionsummary(self,whichstream_):\n res = __library__.MSK_XX_solutionsummary(self.__nativep,whichstream_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def analyzesolution(self,whichstream_,whichsol_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.analyzesolution(whichstream_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def optimizersummary(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.optimizersummary(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def analyze(self, program: ghidra.program.model.listing.Program) -> None:\n ...", "def readsummary(self,whichstream_):\n res = __library__.MSK_XX_readsummary(self.__nativep,whichstream_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def analyse_per_task(self, task_id=None):\n per_task = self.chipdata.cast(\n self.chipdata.get_var_strict('$_first_scratch_mem').address,\n 'scratch_per_task_data'\n )\n matching_id = False\n for sc_table in self.parse_linked_list(per_task.address, 'next'):\n if (task_id is None) or (sc_table.value[0] is task_id):\n self.formatter.output(str(sc_table))\n matching_id = True\n if (task_id is not None) and (not matching_id):\n self.formatter.output(\n 'There is no task id = ' + str(cu.hex(task_id)) + '!'\n )", "def analyse(self, data=None):\n pass", "def onesolutionsummary(self,whichstream_,whichsol_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.onesolutionsummary(whichstream_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __do_analysis(self):\n #Step 1: connect to mongodb and pick a streamer\n dbclient = db_connect.DBClient()\n streamer_data = dbclient.analyze_number_of_stream_viewers(self.streamer)\n streamer_messeges_data = dbclient.analyzeStream(self.streamer)\n\n timearr = []\n messagesarr = []\n streamer_timearr = []\n num_chattersarr = []\n\n #create time and messages array for plotting purposes\n for entry in streamer_messeges_data:\n timearr.append(entry['start_time'])\n messagesarr.append(entry['messeges_count'] * entry['messeges_count'])\n #print(entry['start_time'])\n\n #create time and chatters array for plotting purposes\n for entry in streamer_data:\n streamer_timearr.append(entry['deltatime_from_start_of_clip'])\n num_chattersarr.append(entry['num_viewers'])\n\n # print('start time: ' + str(timearr[0]))\n # print('end time: ' + str(timearr[-1]))\n # print('duration: ' + str(timearr[-1] - timearr[0]))\n # print('average views/min = ' + str(sum(messagesarr) / len(messagesarr)))\n\n average_message_count = sum(messagesarr) / len(messagesarr)\n\n averagearr = []\n plotting_time_arr = []\n labelarr = []\n\n for i in range(len(timearr)):\n averagearr.append(average_message_count*1.8)\n #print(str(timearr[i]) + ' converts to ' + str(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i]))\n plotting_time_arr.append(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i])\n labelarr.append(str(i))\n\n plotting_streamer_timearr = []\n for i in range(len(streamer_timearr)):\n plotting_streamer_timearr.append(datetime.datetime(2020, 1, 1, 0, 0) + streamer_timearr[i])\n\n #plot messages and cuttoff\n messeges_over_time_fig = pyplot.figure(1)\n messeges_over_time_fig.set_figheight(15)\n messeges_over_time_fig.set_figwidth(30)\n messeges_over_time_fig.suptitle(self.streamer + \"'s video data\")\n messeges_over_time_sub = messeges_over_time_fig.add_subplot(211)\n\n pyplot.plot(plotting_time_arr,messagesarr,label='messages/min')\n dots = pyplot.plot(plotting_time_arr,messagesarr,'bo',label='messages/min')\n\n #label dots\n count = 0\n last_entry_was_above_line = False\n for i in range(len(plotting_time_arr)):\n #print(str(count) +': comparing ' + str(messagesarr[i]) + ' with ' + str(averagearr[i]))\n if(messagesarr[i] > averagearr[i]):\n if(last_entry_was_above_line):\n #Don't increment the count because this is part of the same clip\n count = count\n else:\n #new clip above the line, increment clip count\n count = count + 1\n messeges_over_time_sub.annotate(count,xy=(plotting_time_arr[i],messagesarr[i]))\n last_entry_was_above_line = True\n else:\n last_entry_was_above_line = False\n # messeges_over_time_sub.annotate('NA',xy=(plotting_time_arr[i],messagesarr[i]))\n\n #finish plotting\n pyplot.plot(plotting_time_arr, averagearr,'',label='average')\n pyplot.gcf().autofmt_xdate()\n pyplot.ylabel('Messeges*Messeges')\n pyplot.xlabel('Time')\n\n viewers_over_time_sub = messeges_over_time_fig.add_subplot(212)\n\n pyplot.plot(plotting_streamer_timearr,num_chattersarr,label='num chatters')\n pyplot.ylabel('Chatters')\n pyplot.xlabel('Time')\n\n pyplot.tight_layout()\n pyplot.savefig(output_file_location+self.streamer+'.png')\n print('saved chart to ' + output_file_location+self.streamer+'.png')\n # pyplot.show()\n return average_message_count, streamer_messeges_data", "def Analyze(self, data):\n self._AddResult()", "def run(self, stream):\n pass", "def analyze(data):\n ## Do welch periodogram here\n pass", "def analyzesolution(self,whichstream_,whichsol_):\n res = __library__.MSK_XX_analyzesolution(self.__nativep,whichstream_,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def execute(self, stream):\n pass", "def call(self, task):\n call, args = task[0], task[1:]\n\n if call == codes.SCRAPE:\n return self.scrape(*args)\n if call == codes.ANALYZE:\n return self.analyze(*args)", "def processTask(self):\n #Util.set_color(Util.FOREGROUND_YELLOW | Util.FOREGROUND_INTENSITY)\n #logging.info(\"cmd : %s\", self.ExecutionTask.get_cmd())\n #logging.info(\"param : %s\", self.ExecutionTask.get_param())\n #logging.info(\"ret : %s\", str(self.ExecutionTask.get_ret()))\n #logging.info(\"ipport : %s\", self.ExecutionTask.get_ipport())\n #Util.set_color(Util.FOREGROUND_WHITE)\n\n ##############################################################\n # Process for any commands without received messages.....\n ##############################################################\n if self.ExecutionTask.get_cmd() == 'PASS' or self.ExecutionTask.get_cmd() == 'FAIL':\n logging.debug(\"result is %s\", self.ExecutionTask.get_cmd())\n self.setStatus('STOP')\n self.setTestResult(self.ExecutionTask.get_cmd())\n return\n\n if self.ExecutionTask.get_cmd() == 'r_info':\n rinfo_result = self.ExecutionTask.get_param().split('!')\n\n if len(rinfo_result) > 1:\n msg = rinfo_result[1]\n logging.debug(\"%s\", msg)\n\n self.setStatus('STOP')\n self.setTestResult(rinfo_result[0])\n return\n\n if self.ExecutionTask.get_cmd() == 'ResultCheck':\n time.sleep(5)\n self.process_ResultCheck()\n return\n\n if self.ExecutionTask.get_cmd() == 'CheckThroughput':\n time.sleep(5)\n throughputChk = StreamHandler(self.test_mngr_initr)\n chk_result = throughputChk.processStreamResults(self.ExecutionTask.get_param())\n self.setCheckResult(chk_result)\n #if 'FAIL' in chk_result:\n # self.setStatus('STOP')\n return\n\n if self.ExecutionTask.get_cmd() == 'config_multi_subresults':\n self.process_config_multi_subresults()\n return\n\n ##############################################################\n # Process for any commands with received messages......\n ##############################################################\n status = \"\"\n retDict = self.ExecutionTask.get_ret()\n recvStr = \"\"\n if self.ExecutionTask.recv:\n recvStr = self.ExecutionTask.recv.rstrip('\\r\\n')\n #print \"recv : \" + recvStr\n \n if GlobalConfigFiles.curr_prog_name == \"WMMPS\" and \"sniffer_control_subtask\" in self.ExecutionTask.get_cmd():\n logging.debug('In WMMPS, before parsing the recvStr: %s' % recvStr)\n lines = re.split('\\n', recvStr)\n for line in lines:\n if re.search(\"RESULT\", line, re.I):\n if \"FAIL\" in line:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n logging.debug('set test result to FAIL')\n return\n if \"PASS\" in line:\n self.setTestResult('PASS')\n logging.debug('set test result to Pass')\n return\n return\n \n stitems = recvStr.split(',') \n if len(stitems) < 2:\n #logging.debug(\"Bypassing this cmd..\")\n return\n\n status = stitems[1]\n iDNB = TestScriptSymbolTable.get_value_from_sym_tab(\"iDNB\", TestScriptSymbolTable.test_script_sym_tab)\n iINV = TestScriptSymbolTable.get_value_from_sym_tab(\"iINV\", TestScriptSymbolTable.test_script_sym_tab) \n \n if iINV is None:\n iINV = 0\n \n if 'ERROR' in recvStr or 'INVALID' in recvStr and (iDNB == 0 or iDNB is None) and (iINV == 0 or iINV is None):\n #error case...\n logging.debug(\"Return ERROR or INVALID---> STOP process \")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n elif status != 'COMPLETE' and iDNB == 0 and iINV == 0:\n #incomplete case...(running?)\n logging.debug(\"Command %s not completed\", self.ExecutionTask.get_cmd())\n else:\n displayname = \"\"\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport():\n displayname = tbd.displayname\n break\n \n if \"FAIL\" in recvStr and (iINV == 0 or iINV is None):\n if \"SNIFFER\" in displayname or \"sniffer\" in self.ExecutionTask.get_cmd():\n logging.info(\"Test Case Criteria Failure - Command returned FAIL\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n\n elif self.ExecutionTask.get_cmd() == 'device_get_info':\n try:\n if displayname == '':\n self.tmsPacket.setDutDeviceInfo(recvStr)\n else:\n self.tmsPacket.setTestbedInfo(displayname, recvStr)\n\n #for validation\n self.setValidationInfo(displayname, recvStr)\n\n except OSError:\n logging.debug(\"exception -- device_get_info capi call\")\n elif self.ExecutionTask.get_cmd() == 'ca_get_version':\n self.setValidationInfo(displayname, recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sniffer_get_info':\n self.setValidationInfo('sniffer', recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sta_associate':\n time.sleep(10)\n\n if len(stitems) > 2:\n retParam = self.ExecutionTask.get_param().split(',')\n streamFlag = \"\"\n if len(retParam) > 4:\n streamFlag = retParam[3]\n\n if stitems[2] == 'streamID':\n streamHndler = StreamHandler(self.test_mngr_initr)\n logging.debug(\"stream config - streamID : %s\", stitems[3])\n if streamFlag == 'send':\n logging.debug(\"traffic config - send : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'send',\n retParam[15], retParam[17], streamHndler.running_phase, streamHndler.RTPCount)\n streamHndler.add_streamInfo(streamPacket)\n streamHndler.RTPCount = streamHndler.RTPCount + 1\n\n elif streamFlag == 'receive':\n logging.debug(\"traffic config - receive : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'receive',\n -1, -1, streamHndler.running_phase, -1)\n streamHndler.add_streamInfo(streamPacket)\n\n else:\n logging.debug(\"traffic config - else : \")\n\n\n\n if retParam[1] == 'Multicast':\n logging.debug(\"----MULTICAST----\")\n streamHndler.multicast = 1\n\n if self.ExecutionTask.get_cmd() != \"traffic_agent_send\":\n ret_val = \"%s\" %(stitems[3].strip())\n logging.debug(\"traffic config - ret_val : %s\", ret_val)\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfacetype':\n ret_val = (\"%s\" %(stitems[5]))\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfaceid':\n ret_val = stitems[3].split('_')[0]\n setRetVal(getRetKey(retDict), ret_val)\n\n elif self.ExecutionTask.get_cmd() == 'traffic_stop_ping':\n\n keyVal = retParam[1]\n #\"%s;%s\"%(retParam[1], self.ExecutionTask.get_ipport())\n setRetVal(keyVal, stitems[5])\n #print(\"%s = %s\" % (retParam[1], stitems[5]))\n pinginternalchk = TestScriptSymbolTable.get_value_from_sym_tab(\"PingInternalChk\", TestScriptSymbolTable.test_script_sym_tab)\n temp_key = getRetKey(self.ExecutionTask.get_ret())\n \n if \"$\" in temp_key:\n sent_reply = temp_key.split(',')\n #print \"SLIM==> ping result save...\"\n #print sent_reply[0]\n #print sent_reply[1]\n setRetVal(sent_reply[0], stitems[3])\n setRetVal(sent_reply[1], stitems[5]) \n\n setRetVal(\"$pingResp\", stitems[5])\n if pinginternalchk == '0':\n logging.debug(\"Ping Internal Check\")\n \n elif stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if len(retDict) > 0:\n tempKey = getRetKey(retDict)\n temp_val = tempKey.split(',')\n count = 0\n item_len = len(stitems)\n for i in temp_val:\n if item_len > count + 3:\n setRetVal(i, stitems[3+count])\n count = count + 2\n\n if self.__status == 'STOP':\n logging.debug(\"generate final result if task stops.\")\n #self.generateFinalResult()\n else:\n pass\n #logging.debug(\"Continue---\")\n return", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def task_parse_results():\n pass", "def analyse(self):\n pass", "def test_task1_with_example_input():\n distance = task1(input_stream())\n assert distance == 25", "def sensitivityreport(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.sensitivityreport(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def printdata(self,whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.printdata(whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def runtask(self): \n self.status = 1 #Declare Task as Running.\n \n #Get Ready to Capture Encountered Errors for Mailing\n globs.curErrBlock = ErrorBlock(\"Error Analysis for Task: \"+self.Action) \n\n \"\"\"\n You can practically do anything while a task is running.\n\n Here are some helper functions:\n (Go to these functions for more info)\n 1. runSQLQuery : Executes any sql script.\n 2. find_errors/findErrorsInFiles : Checks a file list for errors and report them.\n 3. Popen : Inbuilt function for executing batch scripts.\n 4. safecopy : copies a file to its destination, reports if file not found.\n\n \"\"\"\n\n if self.op == 1:\n #Task for Gathering Stats\n #Execute Script from the log folder\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n os.chdir(self.schema)\n #The following statement generates a string which contains the absolute path of the sql script and any parameters\n sqlcommand = bytes('@'+globs.props['JDA_HOME']+'\\\\config\\\\database\\\\scpoweb\\\\gather_db_stats '+self.schema, 'utf-8')\n #The following function automatically executes the sqlcommand given above\n runSQLQuery(sqlcommand, globs.props['System_Username'], globs.LogPipe)\n #The following code is used for handling error inside a single file\n log_file = \"\\\\\".join([globs.ARCHIVEFOLDER, self.phase, self.schema, \"gather_db_stats.log\"])\n errFound = find_errors(log_file, [\"ORA-\", \"PLS-\"])\n if errFound:\n self.status = 4\n os.chdir(globs.PROGDIR)\n elif self.op == 2:\n #Task for Counting Rows\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n os.chdir(self.schema)\n sqlcommand = bytes(\"@'%s\\\\sqls\\\\CountRows'\"%globs.PROGDIR+ self.schema, 'utf-8')\n runSQLQuery(sqlcommand, self.schema, sys.__stdout__)\n os.chdir(globs.PROGDIR)\n \n elif self.op == 3:\n #Task for Counting Invalid Objects\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n os.chdir(self.schema)\n sqlcommand = bytes(\"@'%s\\\\sqls\\\\InvalidObjects'\"%globs.PROGDIR+ self.schema, 'utf-8')\n runSQLQuery(sqlcommand, self.schema, sys.__stdout__)\n os.chdir(globs.PROGDIR)\n \n elif self.op == 4:\n #Task for WWFMGR Premigration Script\n progPath = os.getcwd()\n #Store location of the batch scriptfolder\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\platform\\\\migration\\\\'\n #Switch Current Working Directory to the Script Folder\n os.chdir(scriptFolder)\n #Use Popen built-in command to execute required script\n #stdout is set to where you want to display the output, LogPipe is our custom console\n session = Popen(['premigrate_webworks.cmd', globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=globs.LogPipe)\n #Wait until Script Finishes Executing\n session.communicate()\n #Move to the Log Folder\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(\"Premigration\")\n #Prepare a list of files that need to be backed up\n BACKUPFILES = ['premigrate.log', 'gen_refschema.log', 'platform_db_creation.log', 'refsch_check.log', 'r_query.log']\n for f in BACKUPFILES:\n #Copy Files one by one\n safecopy(scriptFolder+f, self.schema)\n #Check All Files for Errrors\n if findErrorsInFiles(BACKUPFILES, self):\n self.status = 4\n os.chdir(globs.PROGDIR)\n elif self.op == 5:\n #Task for WWFMGR migration scripts\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\platform\\\\migration\\\\'\n os.chdir(scriptFolder)\n session = Popen(['migrate_webworks.cmd', globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=globs.LogPipe)\n session.communicate()\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(\"Migration\")\n BACKUPFILES = ['migrate_webworks.log', 'platform_db_creation.log', 'gen_refschema.log']\n for f in BACKUPFILES:\n safecopy(scriptFolder+f, self.schema)\n if findErrorsInFiles(BACKUPFILES, self):\n self.status = 4\n os.chdir(globs.PROGDIR)\n\n elif self.op == 6:\n #Task for Monitor Premigration Scripts\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\monitor\\\\migration\\\\'\n os.chdir(scriptFolder)\n session = Popen(['premigrate_monitor.cmd', globs.props['Monitor_Password'], globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=globs.LogPipe)\n session.communicate()\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(\"Premigration\")\n BACKUPFILES = ['premigrate.log', 'platform_db_creation.log', 'gen_refschema.log', 'refsch_check.log']\n for f in BACKUPFILES:\n safecopy(scriptFolder+f, self.schema)\n if findErrorsInFiles(BACKUPFILES, self):\n self.status = 4 \n os.chdir(globs.PROGDIR)\n elif self.op == 7:\n #Task for Monitor Migration Scripts\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\monitor\\\\migration\\\\'\n os.chdir(scriptFolder)\n session = Popen(['migrate_monitor.cmd', globs.props['Monitor_Password'], globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=globs.LogPipe)\n session.communicate()\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(\"Migration\")\n BACKUPFILES = ['migrate_monitor.log', 'platform_db_creation.log', 'gen_refschema.log', 'ema_populate_wwf.log', 'enroll_app_schema.log']\n for f in BACKUPFILES:\n safecopy(scriptFolder+f, self.schema)\n if findErrorsInFiles(BACKUPFILES, self):\n self.status = 4\n os.chdir(globs.PROGDIR)\n \n elif self.op == 13:\n #Task for SCPOMGR Premigration Scripts\n d = globs.saveDir()\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\scpoweb\\\\migration\\\\'\n os.chdir(scriptFolder)\n session = Popen(['premigrate_scpo.cmd', globs.props['SCPO_Password'], globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=sys.__stdout__)\n session.communicate()\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n BACKUPFILES = ['create_scporefschema.log', 'create_wwfrefschema.log', 'grant_manu_privs.log', 'premigrate_scpo.log', 'show_badrows.log']\n for f in BACKUPFILES:\n safecopy(scriptFolder+f, self.schema)\n found = findErrorsInFiles(BACKUPFILES, self)\n globs.SignalObj.updateErrorSignal.emit(\"Review show_badrows.log in %s before proceeding\"%(\"\\\\\".join([globs.ARCHIVEFOLDER, self.phase, self.schema])))\n self.status = 4\n d.restore()\n elif self.op == 9:\n #Task for SCPOMGR Migration Scripts\n d = globs.saveDir()\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\scpoweb\\\\migration\\\\'\n os.chdir(scriptFolder)\n session = Popen(['migrate_scpo.cmd', globs.props['SCPO_Password'], globs.props['WebWORKS_Password'], globs.props['System_Username'], globs.props['System_Password']], stdin=PIPE, stdout=sys.__stdout__)\n session.communicate()\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n BACKUPFILES = ['create_scporefschema.log', 'create_wwfrefschema.log', 'grant_manu_privs.log', 'migrate_scpo.log']\n for f in BACKUPFILES:\n safecopy(scriptFolder+f, self.schema)\n if findErrorsInFiles(BACKUPFILES, self):\n self.status = 4\n d.restore()\n elif self.op == 10:\n #Task for Checking Row Count Matching\n phase = \"Premigration\"\n predct = getattr(globs,'RowCountDict'+phase)\n phase = \"Postmigration\"\n postdct = getattr(globs,'RowCountDict'+phase)\n res = (predct == postdct)\n if not res:\n globs.SignalObj.updateErrorSignal.emit(\"Row Count Matching Failed!\")\n self.status = 4\n elif self.op == 11:\n #Task for Invalid Object Count Matching\n phase = \"Premigration\"\n predct = getattr(globs,'InvalidCountDict'+phase)\n phase = \"Postmigration\"\n postdct = getattr(globs,'InvalidCountDict'+phase)\n res = (predct == postdct)\n if not res:\n globs.SignalObj.updateErrorSignal.emit(\"Invalid Object Count Matching Failed!\")\n self.status = 4\n elif self.op == 103:\n #Task for Creating Manguistics Package in JDA_SYSTEM\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n user = globs.props['JDA_SYSTEM_Username']\n print(\"Creating the ManugisticsPkg table in the JDA System schema\")\n sqlcommand = bytes('@'+globs.props['JDA_HOME']+'\\\\config\\\\database\\\\platform\\\\ManugisticsPkg '+user, 'utf-8')\n stdout, stdin = runSQLQuery(sqlcommand, user, globs.LogPipe)\n log_file = \"\\\\\".join([globs.ARCHIVEFOLDER, self.phase, \"ManugisticsPkg.log\"])\n errFound = find_errors(log_file, [\"ORA-\", \"PLS-\"])\n if errFound:\n self.status = 4\n os.chdir(globs.PROGDIR)\n elif self.op == 104:\n #Task for Creating ABPP Schema if it doesn't exist \n progPath = os.getcwd()\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\platform\\\\'\n os.chdir(scriptFolder)\n session = Popen(['createAbppSchema.cmd'], stdin=PIPE, stdout=globs.LogPipe)\n session.communicate()\n os.chdir(progPath)\n\n elif self.op == 105:\n #Task for Providing ABPP necessary Grants\n sqlcommand = bytes('@sqls/ABPP_GRANTS', 'utf-8')\n runSQLQuery(sqlcommand, globs.props['System_Username'], globs.LogPipe)\n elif self.op == 106:\n #Task for Updating ABPP Schema\n progPath = os.getcwd()\n scriptFolder = globs.props['JDA_HOME']+'\\\\config\\\\database\\\\platform\\\\'\n os.chdir(scriptFolder)\n session = Popen(['updateAbppSchema.cmd', '-coreServices'], stdout=globs.LogPipe, stdin = PIPE)\n session.communicate()\n os.chdir(progPath)\n elif self.op == 107:\n #Premigration Custom Script\n os.chdir(globs.ARCHIVEFOLDER)\n os.chdir(self.phase)\n sqlcommand = bytes(\"@'%s\\\\sqls\\\\custompremgr'\"%globs.PROGDIR, 'utf-8')\n runSQLQuery(sqlcommand, 'JDA_SYSTEM', sys.__stdout__)\n os.chdir(globs.PROGDIR)\n elif self.op == 202:\n #Sample Task Error\n log_file = globs.PROGDIR+'\\\\tmp\\\\sample.log'\n errFound = find_errors(log_file, [\"ORA-\", \"PLS-\"])\n if errFound:\n self.status = 4\n globs.curErrBlock.finalize()", "def analyze_data():\n attack_free_1 = load_messages(\"data/csv/Attack_free_dataset.csv\", verbose=True)\n\n impersonation_1 = load_messages(\"data/csv/170907_impersonation.csv\", verbose=True)\n impersonation_2 = load_messages(\"data/csv/170907_impersonation_2.csv\", verbose=True)\n impersonation_3 = load_messages(\"data/csv/Impersonation_attack_dataset.csv\", verbose=True)\n\n information = {\n \"Mean time between normal messages\":\n get_mean_time_between_normal_messages(attack_free_1),\n \"Mean time between split messages\":\n get_mean_time_between_split_messages(attack_free_1),\n \"Sum of removed intervals in '170907_impersonation.csv'\":\n get_sum_of_removed_intervals(impersonation_1, 250),\n \"Sum of removed intervals in '170907_impersonation_2.csv'\":\n get_sum_of_removed_intervals(impersonation_2, 250),\n \"Sum of removed intervals in 'Impersonation_attack_dataset.csv'\":\n get_sum_of_removed_intervals(impersonation_3, 250),\n \"Index of split in '170907_impersonation.csv'\":\n get_index_before_time(impersonation_1, 250 - 23.434627056121826),\n \"Index of split in '170907_impersonation_2.csv'\":\n get_index_before_time(impersonation_2, 250 - 20.980855226516724),\n \"Index of split in 'Impersonation_attack_dataset.csv'\":\n get_index_before_time(impersonation_3, 250 - 2.1056361198425293)\n }\n\n return information", "def test_task2_with_example_input():\n distance = task2(input_stream())\n assert distance == 286", "def scan():\n print \"Filtering started\"\n #filter new CC & merche\n filterNewOperators()\n\n #add the sample-info to 4_Analysed.csv, with hash, ip, port\n readd_to_toscan()\n\n print \"Scann started\"\n timestampFile = datetime.now()\n\n addHeaderToCSVIfNecessery(trashLog)\n # addHeaderToCSVIfNecessery(activityLog)\n if os.path.isfile(liveAnalysisFile):\n with open(liveAnalysisFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n for target in targetList:\n process = subprocess.Popen(\"sudo nmap -p \" + target['PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" + target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n if err is not None:\n print err\n if \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active: \"+target[\"FILE HASH\"]\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n banner = getBanner(output)\n row.append(banner)\n wr = csv.writer(f)\n wr.writerow(row)\n counter = 0\n with open(targetFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n with open(tempFile, 'w') as f:\n wrTemp = csv.writer(f)\n wrTemp.writerow(['HOST', 'PORT', 'FILE HASH'])\n for target in targetList:\n # TODO: Solve Python problem which doesn't recognise format [command,arg1,arg2]\n process = subprocess.Popen(\"sudo nmap -p \" + target[\n 'PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" +\n target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n\n if \"0 IP addresses\" in output:\n # Means the domain name could not be resolved\n print \"--> Goes to trash\"\n addHeaderToCSVIfNecessery(trashFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(trashFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n elif \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active\"\n\n addHeaderToCSVIfNecessery(liveAnalysisFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n wr = csv.writer(f)\n banner = getBanner(output)\n row.append(banner)\n wr.writerow(row)\n if counter < 6:\n with open(liveAnalysisFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n with open(onlineFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow([target['FILE HASH']])\n counter += 1\n else:\n print \"--> to many to analyse, not added!\"\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n else:\n # Means the operator is now not active but could it be later\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n os.remove(targetFile)\n os.rename(tempFile, targetFile)\n if os.path.isfile(trashFile):\n print \"There are hosts in the trash\"\n try:\n host = socket.gethostbyname(\"www.google.com\")\n socket.create_connection((host, 80), 2)\n print \"Connected to internet -- hosts in trash are removed\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(trashLog, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([timestampFile, trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n except:\n print \"No internet - the hosts will be replaced in target\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(targetFile, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n online()", "def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)" ]
[ "0.7656688", "0.66234916", "0.61678094", "0.61584896", "0.61026996", "0.5949743", "0.58778185", "0.58460075", "0.57688403", "0.5664479", "0.5660007", "0.5589258", "0.55565846", "0.5520233", "0.54727155", "0.5431195", "0.53835535", "0.5372877", "0.5285206", "0.52802324", "0.5272551", "0.52501327", "0.51977205", "0.51694167", "0.5159908", "0.5151219", "0.5140118", "0.5123011", "0.51155347", "0.51143235" ]
0.7232371
1
Analyze the names and issue an error for the first invalid name. analyzenames(self,whichstream_,nametype_)
def analyzenames(self,whichstream_,nametype_): res = __library__.MSK_XX_analyzenames(self.__nativep,whichstream_,nametype_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyzenames(self,whichstream_,nametype_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n if not isinstance(nametype_,nametype): raise TypeError(\"Argument nametype has wrong type\")\n res = self.__obj.analyzenames(whichstream_,nametype_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _check_name(self):\n\t\tpass", "def name_error(name):\n\n if len(name) > MAX_NAME_LENGHT:\n raise InputError(description=\"Name cannot be more than 20 characters long\")", "def check_names(treat, control, error_stream):\n tchrnames = set(treat.get_chr_names())\n cchrnames = set(control.get_chr_names())\n commonnames = tchrnames.intersection(cchrnames)\n if len(commonnames)==0:\n error_stream(\"No common chromosome names can be found from treatment and control!\")\n error_stream(\"Please make sure that the treatment and control alignment files were generated by using the same genome assembly!\")\n error_stream(\"Chromosome names in treatment: %s\" % \",\".join(sorted(tchrnames)))\n error_stream(\"Chromosome names in control: %s\" % \",\".join(sorted(cchrnames)))\n sys.exit()", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def _check_name(\n self,\n node_type: str,\n name: str,\n node: nodes.NodeNG,\n confidence: interfaces.Confidence = interfaces.HIGH,\n disallowed_check_only: bool = False,\n ) -> None:\n\n def _should_exempt_from_invalid_name(node: nodes.NodeNG) -> bool:\n if node_type == \"variable\":\n inferred = utils.safe_infer(node)\n if isinstance(inferred, nodes.ClassDef):\n return True\n return False\n\n if self._name_allowed_by_regex(name=name):\n return\n if self._name_disallowed_by_regex(name=name):\n self.linter.stats.increase_bad_name(node_type, 1)\n self.add_message(\n \"disallowed-name\", node=node, args=name, confidence=interfaces.HIGH\n )\n return\n regexp = self._name_regexps[node_type]\n match = regexp.match(name)\n\n if _is_multi_naming_match(match, node_type, confidence):\n name_group = self._find_name_group(node_type)\n bad_name_group = self._bad_names.setdefault(name_group, {})\n # Ignored because this is checked by the if statement\n warnings = bad_name_group.setdefault(match.lastgroup, []) # type: ignore[union-attr, arg-type]\n warnings.append((node, node_type, name, confidence))\n\n if (\n match is None\n and not disallowed_check_only\n and not _should_exempt_from_invalid_name(node)\n ):\n self._raise_name_warning(None, node, node_type, name, confidence)\n\n # Check TypeVar names for variance suffixes\n if node_type == \"typevar\":\n self._check_typevar(name, node)", "def test_invalid_stream_rename(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n stream = self.subscribe(user_profile, \"stream_name1\")\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n # Check for empty name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"\"})\n self.assert_json_error(result, \"Stream name can't be empty!\")\n # Check for long name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"a\" * 61})\n self.assert_json_error(result, \"Stream name too long (limit: 60 characters).\")\n # Check for Cc characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\n\\rname\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")\n # Check for Cn characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\uFFFEame\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")", "def verify_name_syntax(sv, name, here, argtext, last):\r\n if name.find(Equal)!=-1: # \"=\" is not allowed in names\r\n print(\"\\n\", Err_equal_in_name, \"\\n\", name) # *** Illegal character in name: \"+ Equal +\" *** \r\n raise ReferenceError\r\n\r\n if not name or here==0: # name may not start with a bracket\r\n print(\"\\n\", Err_empty_name) # *** Syntax error: empty name *** \r\n print(name)\r\n if num>2: # common source of empty name error\r\n print(Help_continuation+Mline+\"' ):\") # you may have meant (with continuation character '\"+Mline):\r\n print(lines[num-3].strip(Space)+Col, Mline, Crlf, name) # suggested correction\r\n raise ReferenceError\r\n\r\n if argtext: # name is a function or a dict\r\n fun=name[:here]\r\n if fun in Internal_Functions: \r\n print(\"\\n\", Err_redef_internal_func) # *** Error: You cannot define an internal function *** \r\n print(fun, \"in\", fun+Obr+argtext+Cbr)\r\n raise ReferenceError\r\n \r\n if name[last:]: # name must end with closing bracket after args\r\n print(\"\\n\", Err_text_after_args) # *** Syntax error: text found after arguments *** \r\n print(name)\r\n raise ReferenceError", "def check_filename(name, fileinfos): \n try:\n if not name in fileinfos.keys():\n raise ValueError(\"Error: The XML file could not be found.\")\n except ValueError as err:\n print(err)\n exit(1)", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def test_named_entities(self) -> None:\n for named_entitity_rule in self.rules.named_entities:\n identity: str = named_entitity_rule[\"identity\"]\n type: Optional[str] = named_entitity_rule.get(\"type\")\n subtype: Optional[str] = named_entitity_rule.get(\"subtype\")\n invalid: Optional[str] = named_entitity_rule.get(\"invalid\")\n valid: Optional[str] = named_entitity_rule.get(\"valid\")\n\n for named_entity in self.report.get_named_entities(identity, type, subtype):\n text: str = \" \".join([w.text for w in named_entity.words])\n if valid and (not re.search(valid, text, re.I)):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )\n elif invalid and re.search(invalid, text, re.I):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )", "def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None", "def raiseNameError(text):\n pattern = re.compile(\"[a-zA-Z]\")\n if not pattern.match(text):\n raise Exception(\"Invalid Name Entered\")", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def check_imported_name(self, name, field, sourcepath):\n if len(name) > 80:\n raise NameFormatError(\"ERROR: %s: %s name too long: %s\"\n % (sourcepath, field, name))", "def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)", "def check_funny_chars_in_names(names, is_full_qualified_name=True):\n if names and len(names) > 0:\n for name in names:\n if ('\\t' in name or '\\n' in name or '!' in name or ',' in name or\n (is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):\n raise Exception('Name has an invalid character \"\\\\t\" \"\\\\n\" \"!\" \",\" \".\": \"%s\"' % name)", "def isAddName(name):\t\n if lib.essentials.isAlphanumeric(name) != 0:\n\tprint \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name)\n #output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name))) \n return -1\n \n if lib.essentials.isStartNumeric(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)\n\t#output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)))\n return -1\n\n if lib.essentials.isContainSpecial(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)\n\t#output.completeOutputError(InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)))\n return -1\n\n# if lib.db.db.ifExistsInDatabase(name) == 0:\n#\tprint NameError(\"'%s' is not valid name. \\n Already Exists\" % (name))\n#\treturn -1\n \n return 0", "def name_check(f_name):\r\n if len(f_name) == 0:\r\n print('The first name must be filled in.')\r\n if len(f_name) < 2:\r\n print(f_name + ' is not a valid name. Itis too short.')", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def parse_input_topgro_names( name ):\n\n #Check whether we're working with a list or prefix\n if not os.path.isfile(name[0]):\n #If the first entry is not a name, then it is probably a prefix\n names = (name + '.top', name + '.gro')\n for n in names:\n assert os.path.isfile(n), \"No such input file %s...\" % n\n return names \n else:\n names = name\n for n in names:\n assert os.path.isfile(n), \"No such input file %s...\" % n\n\n return names", "def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']", "def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)", "def find_new_name(self, std, name):\n all_names = [case.read_name() for case in std.get_all(aster_s.Case)]\n new_name = name\n for idx in xrange(100):\n if new_name not in all_names:\n return new_name\n new_name = name + str(idx)\n else:\n mod.launch(ERROR, \"Too many wizards '%s' in use\" % name)", "def analyzeproblem(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.analyzeproblem(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __readName(self, bstream):\r\n raise NotImplementedError(self.__class__.__name__)\r\n pass", "def _validate_names(mapping: Mapping[str, Any],\n ref: str) -> Tuple[Set[str], List[SchemaError]]:\n errs = [] # type: List[SchemaError]\n\n names = {mapping['name']}\n\n if 'classes' in mapping:\n for i, obj in enumerate(mapping['classes']):\n name = obj['name']\n if name in names:\n errs.append(\n SchemaError(\n message=\"Duplicate names: {!r}\".format(name),\n ref=\"{}/classes/{}/name\".format(ref, i)))\n\n names.add(name)\n\n if 'embeds' in mapping:\n for i, obj in enumerate(mapping['embeds']):\n name = obj['name']\n if name in names:\n errs.append(\n SchemaError(\n message=\"Duplicate names: {!r}\".format(name),\n ref=\"{}/embeds/{}/name\".format(ref, i)))\n\n names.add(name)\n\n return names, errs", "def verif_similar_names(sv):\r\n ok=True\r\n names=[os.path.normcase(n) for n in sv.Object_list] # list names without case\r\n names.sort() # facilitate compare one to the next\r\n for i, n in enumerate(names[:-1]): # scan whole list\r\n a,b=n[:-1], names[i+1][:-1] # names minus last char\r\n c=names[i+1][-1] # last char in full name\r\n d=n[-1] # last char in full name\r\n if len(a)>1 and (c <\"0\" or c>\"9\") and (d <\"0\" or d>\"9\") and a[-1]!=Underscore and b in [a, n]:\r\n if ok:\r\n print(\"\")\r\n ok=False\r\n warn(\"\\n\"+Warn_typing_risk+\"\\n'\"+n+\"' / '\"+names[i+1]+\"'\") # *** Warning: risk of typing error in '\"+n+\"' or '\"+names[i+1]+\"' *** \r\n \r\n if not ok: print(\"\")", "def check_name(self, name):\n status, msg = utils.validate_name(name, \"36\", \"storageview name\")\n if not status:\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n else:\n LOG.info(msg)" ]
[ "0.856169", "0.6125964", "0.5851493", "0.5766484", "0.5649188", "0.5622006", "0.552477", "0.55118906", "0.55098695", "0.5505015", "0.55004114", "0.54994583", "0.5494788", "0.5492459", "0.5460644", "0.5454948", "0.54511714", "0.544296", "0.5436722", "0.5407702", "0.53509", "0.5346151", "0.5327351", "0.53130955", "0.53126013", "0.5311534", "0.5298277", "0.52932835", "0.5292966", "0.528761" ]
0.7843094
1
Print information related to the quality of the solution. analyzesolution(self,whichstream_,whichsol_)
def analyzesolution(self,whichstream_,whichsol_): res = __library__.MSK_XX_analyzesolution(self.__nativep,whichstream_,whichsol_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyzesolution(self,whichstream_,whichsol_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.analyzesolution(whichstream_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def onesolutionsummary(self,whichstream_,whichsol_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.onesolutionsummary(whichstream_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def onesolutionsummary(self,whichstream_,whichsol_):\n res = __library__.MSK_XX_onesolutionsummary(self.__nativep,whichstream_,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solutionsummary(self,whichstream_):\n res = __library__.MSK_XX_solutionsummary(self.__nativep,whichstream_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solutionsummary(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.solutionsummary(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def print_solution():\n pass", "def solve(self):\n print(\"Problem %s Answer: %s\" % (self.number, self.solution()))", "def print_solution(self, solution):\n if self._background is None:\n bg_weights = solution[0 : self.nprimaries]\n mod_weights = solution[self.nprimaries : self.nprimaries * 2]\n else:\n bg_weights = self._background\n mod_weights = solution\n\n print(f\"Background spectrum: {self.w2s(bg_weights)}\")\n print(f\"Modulation spectrum: {self.w2s(mod_weights)}\")", "def show_solution(self,show):\r\n self.showSolution = show", "def _explain(self, solution):\n all_true = self.implied_true.union(self.answered_true).union(self.current_subgraph)\n\n # recalculate all data\n self.data_graph = self._initialise_data()\n\n # get the nodes that were not used\n unused = all_true.symmetric_difference(self.data_graph.nodes)\n\n # remove the unused nodes from graph\n self.data_graph.remove_nodes(unused)\n\n # print the remaining graph:\n print(\"Řešení bylo odvozeno od následujícího průchodu grafem: \")\n self.data_graph.graphviz_draw(\"Solution to:\", solution.name)\n self.data_graph.print_nice()", "def printSolution(self):\n print \"----- Solution -----\"\n for feature in self.features:\n print \"Name = \" + feature.name + \" Value = \" + str(feature.value)", "def print_solution(self, solution_path):\n print(\"---SOLUTION---: \")\n for node in solution_path:\n node.state.plot_cube(\n \"SOLUTION: Node [\" + str(node.id) + \"] at depth \" + str(node.node_depth)\n )\n if node.last_action != None:\n print(\"Next action: \", node.last_action)\n print(\"[\" + str(node.id) + \"] \" + str(node.state.create_md5()))\n\n print(\"\\n TOTAL COST: \", solution_path[len(solution_path) - 1].cost)", "def print_result(solution, states_expanded, max_fringe):\n if solution is None: \n print(\"No solution found.\")\n else: \n print(\"Solution has {} actions.\".format(len(solution)))\n print(\"Total states expanded: {}.\".format(states_expanded))\n print(\"Max fringe size: {}.\".format(max_fringe))", "def print_synthesis_details(self):\n super(KBModelM2, self).print_synthesis_details()\n self.logger.debug(f\"{self.num_facts_violating_functionality} facts violated functionality\")\n self.logger.debug(f\"{self.num_facts_violating_inverse_functionality} facts violated inverse functionality\")\n self.logger.debug(f\"{self.num_facts_violating_non_reflexiveness} facts violated non-reflexiveness\")", "def readsolution(self,whichsol_,filename_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.readsolution(whichsol_,filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def analyzeproblem(self,whichstream_):\n res = __library__.MSK_XX_analyzeproblem(self.__nativep,whichstream_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def writesolution(self,whichsol_,filename_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.writesolution(whichsol_,filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def sensitivityreport(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.sensitivityreport(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def reporting(self, sol):\r\n if self.cond == True:\r\n time = python_time.clock()\r\n dt = time - self._reporting_statics.time\r\n\r\n def rp(txt):\r\n print(\"ht3_solver:\\t\" + txt)\r\n\r\n if self._reporting_statics.last_report - time < 0:\r\n rp(\"Completed step \" + str(self.step - 1) + \" in \" \\\r\n + str(dt) + \" s.\")\r\n steps_rem = (self.max_T - self.current_T) / self.d_T\r\n completion = 1 - steps_rem / (self.step + steps_rem)\r\n rp(str(int(completion * 100)) + \"% complete.\")\r\n more_steps = np.ceil((self.max_T - self.current_T) / self.d_T)\r\n more_time = more_steps * dt\r\n exp_fin = python_time.asctime(python_time.localtime(\r\n python_time.time() + int(more_time)))\r\n rp(\"Expected completion is \" + exp_fin)\r\n print(\"\\n\")\r\n rp(\"Starting step \" + str(self.step) + \".\")\r\n self._reporting_statics.last_report = time\r\n self._reporting_statics.time = time", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def analyzeproblem(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.analyzeproblem(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def show_summary(self, out = None, debug = False):\n if (out is None) : out = sys.stdout\n results = self.matching_candidates\n if (len(results) > 0):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n if (self.no_final):\n print(\" Found potential ion%s outside of specified set:\" % \\\n (\"s\" if len(results) > 1 else \"\"), file=out)\n if (self.final_choice is not None):\n # We have one result that we are reasonably certain of\n elem_params, score = results[0]\n if elem_params.element not in mmtbx.ions.HALIDES:\n self.atom_props.show_ion_results(\n identity = str(self.final_choice),\n out = out,\n valence_used = self.valence_used,\n confirmed = True)\n else:\n print(\" Probable anion:\", str(elem_params), file=out)\n print(\"\", file=out)\n elif (len(results) > 1):\n # We have a couple possible identities for the atom\n below_cutoff = [ elem_params for elem_params, score in results\n if score < self.ambiguous_valence_cutoff]\n if len(below_cutoff) == 1:\n elem_params = below_cutoff[0]\n print(\" ambigous results, best valence from %s\" % \\\n str(elem_params), file=out)\n self.atom_props.show_ion_results(\n identity = str(elem_params),\n out = out,\n valence_used = True)\n print(\"\", file=out)\n else:\n ions = [str(i[0]) for i in sorted(results, key = lambda x: x[1])]\n print(\" ambiguous results, could be %s\" % \", \".join(ions), file=out)\n for elem_params, score in results :\n self.atom_props.show_ion_results(identity = str(elem_params),\n out = out)\n print(\"\", file=out)\n else:\n if (self.atom_type != WATER) or (self.nuc_phosphate_site):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n # try anions now\n if (self.looks_like_halide):\n print(\" Probable cation: %s\" % str(self.final_choice), file=out)\n print(\"\", file=out)\n else:\n # atom is definitely not water, but no reasonable candidates found\n # print out why all the metals we tried failed\n if (debug) and (len(self.filtered_candidates) > 0):\n print(\" insufficient data to identify atom\", file=out)\n possible = True\n for params in self.filtered_candidates:\n if (self.atom_props.has_compatible_ligands(str(params))):\n if possible:\n print(\" possible candidates:\", file=out)\n possible = False\n self.atom_props.show_ion_results(identity = str(params),\n out = out)\n else :\n print(\" incompatible ligands for %s\" % str(params), file=out)\n #print >> out, \" rejected as unsuitable:\"\n #for params in self.rejected_candidates:\n # if (self.atom_props.has_compatible_ligands(str(params))):\n # self.atom_props.show_ion_results(identity = str(params),\n # out = out)\n # else :\n # print >> out, \" incompatible ligands for %s\" % str(params)\n print(\"\", file=out)", "def info(self):\n\n print(\"pupil file =\", self.pupil_file)\n print(\"phase file =\", self.phase_file)\n print(\"wavelengths and weights =\")\n for i in range(len(self.filter[0])):\n print(\" %10.5f %6.4f\" % (self.filter[0][i], self.filter[1][i]))\n print(\"pupil diameter (meters) =\", self.D)\n if self.oversample == 2:\n print(\"oversampling factor = 2 (Nyquist sampling)\")\n else:\n r = float(self.oversample) / 2.\n print(\"oversampling factor = %d (%g * Nyquist sampling)\" % \\\n (self.oversample, r))\n if self.type == SINGLE_PREC:\n print(\"computations will use single precision\")\n else:\n print(\"computations will use double precision\")\n print(\"size of output image =\", self.output_size)\n if self.cdelt is not None:\n print(\"output pixel size (arcsec) =\", self.cdelt / ARCSECtoDEGREES)\n if self.output_written:\n print(\"The computed PSF has been written to the output file.\")\n else:\n print(\"The output file has not been written yet.\")", "def PrintSolution(self):\n sol = \"\"\n charMap = {\n Magnets.EMPTY: '.',\n Magnets.PLUS: '+',\n Magnets.MINUS: '-',\n }\n for row in self.Solution():\n for space in row:\n sol = sol + charMap.get(space, '?')\n sol = sol + '\\n'\n return sol", "def OnSolutionCallback(self):\n self.total_plans += 1\n print('Feasible Project Plan #{c}:'.format(c=self.total_plans))\n for idx in range(0, len(self.p_)):\n if self.Value(self.p_vars_[idx]):\n print(' - Project ID: {p} (Cost={c}, Value={v})'.format(\n p=(idx + 1), c=self.p_[idx][4], v=self.p_[idx][3]))\n print(' - Total Cost : {c}'.format(c=self.Value(self.total_cost_)))\n print(' - Total Value : {v}'.format(v=self.Value(self.total_value_)))", "def printSolutions(self):\n\t\tprint \"Computing solutions...\"\n\t\t\n\t\tsolutions = self.problem.getSolutions()\n\t\tnumberOfSolutions = len(solutions)\n\t\t\n\t\tfor i, solution in enumerate(solutions):\n\t\t\titems = solution.items()\n\t\t\t# sort by time\n\t\t\titems.sort(lambda a, b: cmp(a[1], b[1]))\n\t\t\t# sort by order\n\t\t\titems.sort(lambda a, b: cmp(a[0][0], b[0][0]))\n\t\t\t\n\t\t\tprint \"Solution number\", i + 1\n\t\t\t\n\t\t\ti = 1\n\t\t\tfor j in items:\n\t\t\t\tif j[0][0:1] != str(i):\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n\",\n\t\t\t\t\tprint \"Order no:\", i\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\t\ti += 1\n\t\t\tprint \"\\n==============================================\\n\",\n\t\tprint \"Number of solutions:\", numberOfSolutions\n\t\treturn solutions, numberOfSolutions", "def readsolution(self,whichsol_,filename_):\n if isinstance(filename_,unicode):\n filename_ = filename_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_readsolution(self.__nativep,whichsol_,filename_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return" ]
[ "0.75133455", "0.73406655", "0.73010904", "0.71412873", "0.6950169", "0.6524369", "0.6344713", "0.6188728", "0.6182085", "0.61371076", "0.6001918", "0.59650385", "0.58990616", "0.5842757", "0.5792607", "0.57907444", "0.5786922", "0.5766608", "0.57455885", "0.56941146", "0.56935096", "0.5609289", "0.56085026", "0.56007594", "0.55987173", "0.55852824", "0.55821395", "0.5547863", "0.55306405", "0.5501128" ]
0.7367534
1
Prepare a task for basis solver. initbasissolve(self,basis_)
def initbasissolve(self,basis_): _basis_minlength = self.getnumcon() if self.getnumcon() > 0 and basis_ is not None and len(basis_) != self.getnumcon(): raise ValueError("Array argument basis is not long enough: Is %d, expected %d" % (len(basis_),self.getnumcon())) if isinstance(basis_,numpy.ndarray) and not basis_.flags.writeable: raise ValueError("Argument basis must be writable") if isinstance(basis_, numpy.ndarray) and basis_.dtype is numpy.dtype(numpy.int32) and basis_.flags.contiguous: _basis_copyarray = False _basis_tmp = ctypes.cast(basis_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif basis_ is not None: _basis_copyarray = True _basis_np_tmp = numpy.zeros(len(basis_),numpy.dtype(numpy.int32)) _basis_np_tmp[:] = basis_ assert _basis_np_tmp.flags.contiguous _basis_tmp = ctypes.cast(_basis_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _basis_copyarray = False _basis_tmp = None res = __library__.MSK_XX_initbasissolve(self.__nativep,_basis_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _basis_copyarray: basis_[:] = _basis_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initbasissolve(self,basis): # 3\n _copyback_basis = False\n if basis is None:\n basis_ = None\n else:\n try:\n basis_ = memoryview(basis)\n except TypeError:\n try:\n _tmparr_basis = array.array(\"i\",basis)\n except TypeError:\n raise TypeError(\"Argument basis has wrong type\")\n else:\n basis_ = memoryview(_tmparr_basis)\n _copyback_basis = True\n else:\n if basis_.format != \"i\":\n basis_ = memoryview(array.array(\"i\",basis))\n _copyback_basis = True\n if basis_ is not None and len(basis_) != self.getnumcon():\n raise ValueError(\"Array argument basis has wrong length\")\n res = self.__obj.initbasissolve(basis_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_basis:\n basis[:] = _tmparr_basis", "def prepare(self) -> None:\n\n \"\"\"\n Objective function\n Coefficient -2 means that we solve maximization problem (multiple all \n value to -1) and also there are left coverage area and right coverage \n area for each station (2* cov)\n \"\"\"\n\n f = [-2 * self.cov[i] for i in range(self.get_column_num)]\n self._f = np.array(f)\n\n \"\"\" Inequality Constraints\"\"\"\n ineq_cost = [self.cost[i] for i in range(self.get_column_num)]\n self._ineq_constraints = np.array(ineq_cost)\n self._b = np.array(self.cost_limit)\n\n \"\"\" \n There is no equality constraints. \n self._eq_constraints is empty\n self._beq is empty\n \"\"\"", "def task_init(self, param1):\n raise NotImplementedError", "def _set_solver(self):\n self.solver = Solver.select_solver(self.method, self.solver_args)\n if self.method.lower()==\"elastic-net\":\n self.solver.elements=self.basis.elements", "def prepare_so_task(self, subtask_index=1):\n #First clear the task list, to ensure that no tasks are set to run\n self._clear_tasks()\n \n #Next, go to the sensitivities task and set the appropriate variables\n sensTask = self._getTask('sensitivities')\n problem = sensTask.find(xmlns + 'Problem')\n #And open the listofvariables\n for pG in problem:\n if (pG.attrib['name'] == 'ListOfVariables'):\n listOfVariables = pG\n assert listOfVariables != None\n \n #Reset the listOfVariables, and add the appropriate objects\n listOfVariables.clear()\n listOfVariables.set('name', 'ListOfVariables')\n\n #Add a new child element: <ParameterGroup name='Variables'>\n variables = etree.SubElement(listOfVariables, xmlns + 'ParameterGroup')\n variables.set('name', 'Variables')\n\n #Add two new children to variables:\n #<Parameter name='SingleObject')\n singleObject = etree.SubElement(variables, xmlns + 'Parameter')\n singleObject.set('name', 'SingleObject')\n singleObject.set('type', 'cn')\n #<Parameter name='ObjectListType'>\n objectListType = etree.SubElement(variables, xmlns + 'Parameter')\n objectListType.set('name', 'ObjectListType')\n objectListType.set('type', 'unsignedInteger')\n objectListType.set('value', '1')\n \n ############\n \n #Next, load the optimization task\n optTask = self._getTask('optimization')\n #And set it scheduled to run, and to update the model\n optTask.attrib['scheduled'] = 'true'\n optTask.attrib['updateModel'] = 'true'\n \n #Find the objective function we wish to change\n problemParameters = optTask.find(xmlns + 'Problem')\n for parameter in problemParameters:\n if (parameter.attrib['name'] == 'ObjectiveExpression'):\n objectiveFunction = parameter\n \n if (parameter.attrib['name'] == 'Maximize'):\n maximizeParameter = parameter\n \n #Set the subtask to sensitivities\n #TODO: At some point allow for other subtasks\n if (parameter.attrib['name'] == 'Subtask'):\n parameter.attrib['value'] = 'CN=Root,Vector=TaskList[Sensitivities]'\n\n assert objectiveFunction != None\n assert maximizeParameter != None\n\n #Set the appropriate objective function for the optimization task:\n objectiveFunction.text = '<CN=Root,Vector=TaskList[Sensitivities],Problem=Sensitivities,Array=Scaled sensitivities array[.]>'\n \n ############\n #Create a new report for the optimization task\n report_key = 'condor_copasi_sensitivity_optimization_report'\n self._create_report('SO', report_key, report_key)\n \n #And set the new report for the optimization task\n report = optTask.find(xmlns + 'Report')\n \n #If no report has yet been set, report == None. Therefore, create new report\n if report == None:\n report = etree.Element(xmlns + 'Report')\n optTask.insert(0,report)\n \n report.set('reference', report_key)\n report.set('append', '1')\n \n \n #############\n #get the list of strings to optimize\n #self.get_optimization_parameters(friendly=False) returns a tuple containing the parameter name as the first element\n optimizationStrings = []\n for parameter in self.get_optimization_parameters(friendly=False):\n optimizationStrings.append(parameter[0])\n \n #Build the new xml files and save them\n i = 0\n file_list = []\n for optString in optimizationStrings:\n maximizeParameter.attrib['value'] = '1'\n output = 'output_%d.%d.txt' % (subtask_index, i)\n report.attrib['target'] = output\n \n #Update the sensitivities object\n singleObject.set('value',optString)\n \n target = os.path.join(self.path, 'auto_copasi_%d.%d.cps' %(subtask_index, i))\n \n self.write(target)\n file_list.append(target)\n \n maximizeParameter.attrib['value'] = '0'\n output = 'output_%d.%d.txt' % (subtask_index, i + 1)\n report.attrib['target'] = output\n \n target = os.path.join(self.path, 'auto_copasi_%d.%d.cps' % (subtask_index, i+1))\n self.write(target)\n file_list.append(target)\n i = i + 2\n \n return file_list", "def __init__(self, project=None):\n HyppopySolver.__init__(self, project)", "def test_solve_task(self):\n pass", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def initPopulation(self, task):\n\t\tSol, Fitness, d = Algorithm.initPopulation(self, task)\n\t\tA, S, Q, v = np.full(self.NP, self.A), np.full([self.NP, task.D], 0.0), np.full(self.NP, 0.0), np.full([self.NP, task.D], 0.0)\n\t\td.update({'A': A, 'S': S, 'Q': Q, 'v': v})\n\t\treturn Sol, Fitness, d", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def __init__(\n self,\n biorbd_model,\n dynamics_type,\n number_shooting_points,\n phase_time,\n X_init,\n U_init,\n X_bounds,\n U_bounds,\n objective_functions=ObjectiveList(),\n constraints=ConstraintList(),\n parameters=ParameterList(),\n external_forces=(),\n ode_solver=OdeSolver.RK,\n nb_integration_steps=5,\n control_type=ControlType.CONSTANT,\n all_generalized_mapping=None,\n q_mapping=None,\n q_dot_mapping=None,\n tau_mapping=None,\n plot_mappings=None,\n state_transitions=StateTransitionList(),\n nb_threads=1,\n use_SX=False,\n ):\n\n if isinstance(biorbd_model, str):\n biorbd_model = [biorbd.Model(biorbd_model)]\n elif isinstance(biorbd_model, biorbd.biorbd.Model):\n biorbd_model = [biorbd_model]\n elif isinstance(biorbd_model, (list, tuple)):\n biorbd_model = [biorbd.Model(m) if isinstance(m, str) else m for m in biorbd_model]\n else:\n raise RuntimeError(\"biorbd_model must either be a string or an instance of biorbd.Model()\")\n self.version = {\"casadi\": casadi.__version__, \"biorbd\": biorbd.__version__, \"biorbd_optim\": __version__}\n self.nb_phases = len(biorbd_model)\n\n biorbd_model_path = [m.path().relativePath().to_string() for m in biorbd_model]\n self.original_values = {\n \"biorbd_model\": biorbd_model_path,\n \"dynamics_type\": dynamics_type,\n \"number_shooting_points\": number_shooting_points,\n \"phase_time\": phase_time,\n \"X_init\": X_init,\n \"U_init\": U_init,\n \"X_bounds\": X_bounds,\n \"U_bounds\": U_bounds,\n \"objective_functions\": ObjectiveList(),\n \"constraints\": ConstraintList(),\n \"parameters\": ParameterList(),\n \"external_forces\": external_forces,\n \"ode_solver\": ode_solver,\n \"nb_integration_steps\": nb_integration_steps,\n \"control_type\": control_type,\n \"all_generalized_mapping\": all_generalized_mapping,\n \"q_mapping\": q_mapping,\n \"q_dot_mapping\": q_dot_mapping,\n \"tau_mapping\": tau_mapping,\n \"plot_mappings\": plot_mappings,\n \"state_transitions\": state_transitions,\n \"nb_threads\": nb_threads,\n \"use_SX\": use_SX,\n }\n\n # Check integrity of arguments\n if not isinstance(nb_threads, int) or isinstance(nb_threads, bool) or nb_threads < 1:\n raise RuntimeError(\"nb_threads should be a positive integer greater or equal than 1\")\n\n if isinstance(dynamics_type, DynamicsTypeOption):\n dynamics_type_tp = DynamicsTypeList()\n dynamics_type_tp.add(dynamics_type)\n dynamics_type = dynamics_type_tp\n elif not isinstance(dynamics_type, DynamicsTypeList):\n raise RuntimeError(\"dynamics_type should be a DynamicsTypeOption or a DynamicsTypeList\")\n\n ns = number_shooting_points\n if not isinstance(ns, int) or ns < 2:\n if isinstance(ns, (tuple, list)):\n if sum([True for i in ns if not isinstance(i, int) and not isinstance(i, bool)]) != 0:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n else:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n nstep = nb_integration_steps\n if not isinstance(nstep, int) or isinstance(nstep, bool) or nstep < 1:\n raise RuntimeError(\"nb_integration_steps should be a positive integer greater or equal than 1\")\n\n if not isinstance(phase_time, (int, float)):\n if isinstance(phase_time, (tuple, list)):\n if sum([True for i in phase_time if not isinstance(i, (int, float))]) != 0:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n else:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n\n if isinstance(X_init, InitialConditionsOption):\n X_init_tp = InitialConditionsList()\n X_init_tp.add(X_init)\n X_init = X_init_tp\n elif not isinstance(X_init, InitialConditionsList):\n raise RuntimeError(\"X_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(U_init, InitialConditionsOption):\n U_init_tp = InitialConditionsList()\n U_init_tp.add(U_init)\n U_init = U_init_tp\n elif not isinstance(U_init, InitialConditionsList):\n raise RuntimeError(\"U_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(X_bounds, BoundsOption):\n X_bounds_tp = BoundsList()\n X_bounds_tp.add(X_bounds)\n X_bounds = X_bounds_tp\n elif not isinstance(X_bounds, BoundsList):\n raise RuntimeError(\"X_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(U_bounds, BoundsOption):\n U_bounds_tp = BoundsList()\n U_bounds_tp.add(U_bounds)\n U_bounds = U_bounds_tp\n elif not isinstance(U_bounds, BoundsList):\n raise RuntimeError(\"U_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(objective_functions, ObjectiveOption):\n objective_functions_tp = ObjectiveList()\n objective_functions_tp.add(objective_functions)\n objective_functions = objective_functions_tp\n elif not isinstance(objective_functions, ObjectiveList):\n raise RuntimeError(\"objective_functions should be built from an ObjectiveOption or ObjectiveList\")\n\n if isinstance(constraints, ConstraintOption):\n constraints_tp = ConstraintList()\n constraints_tp.add(constraints)\n constraints = constraints_tp\n elif not isinstance(constraints, ConstraintList):\n raise RuntimeError(\"constraints should be built from an ConstraintOption or ConstraintList\")\n\n if not isinstance(parameters, ParameterList):\n raise RuntimeError(\"parameters should be built from an ParameterList\")\n\n if not isinstance(state_transitions, StateTransitionList):\n raise RuntimeError(\"state_transitions should be built from an StateTransitionList\")\n\n if not isinstance(ode_solver, OdeSolver):\n raise RuntimeError(\"ode_solver should be built an instance of OdeSolver\")\n\n if not isinstance(use_SX, bool):\n raise RuntimeError(\"use_SX should be a bool\")\n\n # Declare optimization variables\n self.J = []\n self.g = []\n self.g_bounds = []\n self.V = []\n self.V_bounds = Bounds(interpolation=InterpolationType.CONSTANT)\n self.V_init = InitialConditions(interpolation=InterpolationType.CONSTANT)\n self.param_to_optimize = {}\n\n # nlp is the core of a phase\n self.nlp = [{} for _ in range(self.nb_phases)]\n self.__add_to_nlp(\"model\", biorbd_model, False)\n self.__add_to_nlp(\"phase_idx\", [i for i in range(self.nb_phases)], False)\n\n # Type of CasADi graph\n if use_SX:\n self.CX = SX\n else:\n self.CX = MX\n\n # Define some aliases\n self.__add_to_nlp(\"ns\", number_shooting_points, False)\n for nlp in self.nlp:\n if nlp[\"ns\"] < 1:\n raise RuntimeError(\"Number of shooting points must be at least 1\")\n self.initial_phase_time = phase_time\n phase_time, initial_time_guess, time_min, time_max = self.__init_phase_time(\n phase_time, objective_functions, constraints\n )\n self.__add_to_nlp(\"tf\", phase_time, False)\n self.__add_to_nlp(\"t0\", [0] + [nlp[\"tf\"] for i, nlp in enumerate(self.nlp) if i != len(self.nlp) - 1], False)\n self.__add_to_nlp(\"dt\", [self.nlp[i][\"tf\"] / max(self.nlp[i][\"ns\"], 1) for i in range(self.nb_phases)], False)\n self.nb_threads = nb_threads\n self.__add_to_nlp(\"nb_threads\", nb_threads, True)\n self.solver_type = Solver.NONE\n self.solver = None\n\n # External forces\n if external_forces != ():\n external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)\n self.__add_to_nlp(\"external_forces\", external_forces, False)\n\n # Compute problem size\n if all_generalized_mapping is not None:\n if q_mapping is not None or q_dot_mapping is not None or tau_mapping is not None:\n raise RuntimeError(\"all_generalized_mapping and a specified mapping cannot be used alongside\")\n q_mapping = q_dot_mapping = tau_mapping = all_generalized_mapping\n self.__add_to_nlp(\"q_mapping\", q_mapping, q_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"q_dot_mapping\", q_dot_mapping, q_dot_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"tau_mapping\", tau_mapping, tau_mapping is None, BidirectionalMapping)\n plot_mappings = plot_mappings if plot_mappings is not None else {}\n reshaped_plot_mappings = []\n for i in range(self.nb_phases):\n reshaped_plot_mappings.append({})\n for key in plot_mappings:\n reshaped_plot_mappings[i][key] = plot_mappings[key][i]\n self.__add_to_nlp(\"plot_mappings\", reshaped_plot_mappings, False)\n\n # Prepare the parameters to optimize\n self.state_transitions = []\n if len(parameters) > 0:\n self.update_parameters(parameters)\n\n # Declare the time to optimize\n self.__define_variable_time(initial_time_guess, time_min, time_max)\n\n # Prepare the dynamics of the program\n self.__add_to_nlp(\"dynamics_type\", dynamics_type, False)\n self.__add_to_nlp(\"ode_solver\", ode_solver, True)\n self.__add_to_nlp(\"control_type\", control_type, True)\n for i in range(self.nb_phases):\n self.__initialize_nlp(self.nlp[i])\n Problem.initialize(self, self.nlp[i])\n\n # Prepare path constraints\n self.__add_to_nlp(\"X_bounds\", X_bounds, False)\n self.__add_to_nlp(\"U_bounds\", U_bounds, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Prepare initial guesses\n self.__add_to_nlp(\"X_init\", X_init, False)\n self.__add_to_nlp(\"U_init\", U_init, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_init\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Variables and constraint for the optimization program\n for i in range(self.nb_phases):\n self.__define_multiple_shooting_nodes_per_phase(self.nlp[i], i)\n\n # Define dynamic problem\n self.__add_to_nlp(\n \"nb_integration_steps\", nb_integration_steps, True\n ) # Number of steps of integration (for now only RK4 steps are implemented)\n for i in range(self.nb_phases):\n if self.nlp[0][\"nx\"] != self.nlp[i][\"nx\"] or self.nlp[0][\"nu\"] != self.nlp[i][\"nu\"]:\n raise RuntimeError(\"Dynamics with different nx or nu is not supported yet\")\n self.__prepare_dynamics(self.nlp[i])\n\n # Prepare phase transitions (Reminder, it is important that parameters are declared\n # before, otherwise they will erase the state_transitions)\n self.state_transitions = StateTransitionFunctions.prepare_state_transitions(self, state_transitions)\n\n # Inner- and inter-phase continuity\n ContinuityFunctions.continuity(self)\n\n # Prepare constraints\n self.update_constraints(constraints)\n\n # Prepare objectives\n self.update_objectives(objective_functions)", "def __init__(self,N,Nup=None,m=None,S=\"1/2\",pauli=True,Ns_block_est=None,**blocks):\n\t\tself._S = S\n\t\tself._pauli = pauli\n\t\tsps,S = S_dict[S]\n\n\t\t_Np = blocks.get(\"_Np\")\n\t\tif _Np is not None:\n\t\t\tblocks.pop(\"_Np\")\n\n\t\tif Nup is not None and m is not None:\n\t\t\traise ValueError(\"Cannot use Nup and m at the same time\")\n\t\tif m is not None and Nup is None:\n\t\t\tif m < -S or m > S:\n\t\t\t\traise ValueError(\"m must be between -S and S\")\n\n\t\t\tNup = int((m+S)*N)\n\n\t\tif sps==2:\n\t\t\thcb_basis_general.__init__(self,N,Nb=Nup,Ns_block_est=Ns_block_est,_Np=_Np,**blocks)\n\t\telse:\n\t\t\thigher_spin_basis_general.__init__(self,N,Nup=Nup,sps=sps,Ns_block_est=Ns_block_est,_Np=_Np,**blocks)\n\n\n\t\tif self._sps <= 2:\n\t\t\tself._operators = (\"availible operators for spin_basis_1d:\"+\n\t\t\t\t\t\t\t\t\"\\n\\tI: identity \"+\n\t\t\t\t\t\t\t\t\"\\n\\t+: raising operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\t-: lowering operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\tx: x pauli/spin operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\ty: y pauli/spin operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\tz: z pauli/spin operator\")\n\n\t\t\tself._allowed_ops = set([\"I\",\"+\",\"-\",\"x\",\"y\",\"z\"])\n\t\telse:\n\t\t\tself._operators = (\"availible operators for spin_basis_1d:\"+\n\t\t\t\t\t\t\t\t\"\\n\\tI: identity \"+\n\t\t\t\t\t\t\t\t\"\\n\\t+: raising operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\t-: lowering operator\"+\n\t\t\t\t\t\t\t\t\"\\n\\tz: z pauli/spin operator\")\n\n\t\t\tself._allowed_ops = set([\"I\",\"+\",\"-\",\"z\"])", "def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('Hannibal_HPAdemo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n #~~~~~!!!\n #problem.quantity('terrain3','(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/m^2') #Integral conversion factor\n problem.constant('V',1,'m/s') #Vehicle speed\n problem.constant('elev',1,'m') #Initial Elevation\n\n #Unit scaling\n problem.scale.unit('m',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n #problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=8)\n problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=15, verbose = True, cached = False)\n\n #Initial Guess (Classic test example [4.9,0.4])\n problem.guess.setup('auto',start=[9.0,0.5], costate_guess=[0.0,-0.1]) #City A\n #problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A\n\n #Add Continuation Steps (Classic test example [7.2,8.5]) [8, 4.5]\n problem.steps.add_step(strategy='HPA',hweight=0.9) \\\n .terminal('x', 3.0, 10) \\\n .terminal('y', 9.5, 10) \\\n # .const('w', 0.9, 2, confined=True)\n\n #problem.steps.add_step(strategy='manual').num_cases(10) \\\n # .terminal('x', 3.0) \\\n # .terminal('y', 9.5) \\\n\n #problem.steps.add_step().num_cases(30) \\\n # .const('w',0.99) #Final Terrain weighting factor\n\n\n return problem", "def __init__(self,basis,sw_survey,lw_surveys, prior_params,needs_a=False,do_mit=True):\n print(\"MultiFisher: began initialization\")\n self.basis = basis\n self.sw_survey = sw_survey\n self.lw_surveys = lw_surveys\n self.prior_params = prior_params\n self.needs_a = needs_a\n self.do_mit = do_mit\n\n #prepare to project lw basis to sw basis\n self.n_sw = self.sw_survey.get_total_dimension()\n\n self.lw_F_no_mit = None\n self.lw_F_mit = None\n self.lw_to_sw_array = None\n\n print(\"MultiFisher: getting projection matrices\")\n self.lw_to_sw_array = self.get_lw_to_sw_array()\n self.sw_to_par_array = sw_survey.get_dO_I_dpar_array()\n\n\n\n #TODO eliminate from main loop\n if self.needs_a:\n print(\"MultiFisher: getting lw no mit variance\")\n self.a_vals = np.zeros(2,dtype=object)\n self.lw_F_no_mit = self.get_lw_fisher(f_spec_SSC_no_mit,initial_state=fm.REP_CHOL)\n self.project_lw_a = self.basis.get_ddelta_bar_ddelta_alpha(self.sw_survey.geo,tomography=True)\n self.a_vals[0] = self.lw_F_no_mit.project_covar(self.project_lw_a.T,destructive=True).get_covar()\n self.lw_F_no_mit = None\n else:\n self.a_vals = None\n self.project_lw_a = None\n\n\n #self.lw_F_no_mit = self.get_lw_fisher(f_spec_SSC_no_mit,initial_state=fm.REP_CHOL)\n\n print(\"MultiFisher: projecting lw no mit covariance\")\n #self.sw_f_ssc_no_mit = self.lw_F_no_mit.project_covar(self.get_lw_to_sw_array(),destructive=False)\n vs_perturb,sigma2s_perturb = self.lw_surveys[0].observables[0].get_perturbing_vector()\n sw_cov_ssc,sw_cov_ssc_mit = self.basis.perturb_and_project_covar(vs_perturb,self.get_lw_to_sw_array(),sigma2s_perturb)\n self.sw_f_ssc_no_mit = fm.FisherMatrix(sw_cov_ssc,fm.REP_COVAR,fm.REP_COVAR)\n self.sw_f_ssc_mit = fm.FisherMatrix(sw_cov_ssc_mit,fm.REP_COVAR,fm.REP_COVAR)\n sw_cov_ssc = None\n sw_cov_ssc_mit = None\n vs_perturb = None\n sigma2s_perturb=None\n #self.sw_f_ssc_mit2 = fm.FisherMatrix(self.basis.perturb_and_project_covar(vs_perturb,self.get_lw_to_sw_array(),sigma2s_perturb),fm.REP_COVAR,fm.REP_COVAR)\n #self.lw_F_no_mit = None\n\n if do_mit:\n print(\"MultiFisher: getting lw mit covariance\")\n #self.lw_F_mit = self.get_lw_fisher(f_spec_SSC_mit,initial_state=fm.REP_FISHER)\n #self.lw_F_mit = self.get_lw_fisher(f_spec_SSC_mit,initial_state=fm.REP_COVAR)\n\n if self.needs_a:\n print(\"MultiFisher: getting lw mit variance \")\n self.a_vals[1] = self.lw_F_mit.project_covar(self.project_lw_a.T).get_covar()\n\n print(\"MultiFisher: projecting lw mit covariance\")\n #self.sw_f_ssc_mit = self.lw_F_mit.project_covar(self.get_lw_to_sw_array(),destructive=False)\n #self.lw_F_mit = None\n else:\n self.sw_f_ssc_mit = None\n #accumulate lw covariances onto fisher_tot\n\n #for i in range(0,self.lw_surveys.size):\n # self.lw_surveys[i].fisher_accumulate(self.lw_F_mit)\n #self.lw_F_mit.switch_rep(fm.REP_CHOL_INV)\n #self.lw_F_no_mit.switch_rep(fm.REP_CHOL_INV)\n\n\n #self.lw_F_mit = None\n self.lw_to_sw_array = None\n\n #sw covariances to add\n print(\"MultiFisher: getting sw covariance matrices\")\n self.sw_non_SSC_covars = self.sw_survey.get_non_SSC_sw_covar_arrays()\n self.sw_g_covar = fm.FisherMatrix(self.sw_non_SSC_covars[0],fm.REP_COVAR,fm.REP_COVAR,silent=True)\n self.sw_ng_covar = fm.FisherMatrix(self.sw_non_SSC_covars[1],fm.REP_COVAR,fm.REP_COVAR,silent=True)\n\n if self.sw_survey.C.p_space=='jdem':\n self.fisher_prior_obj = prior_fisher.PriorFisher(self.sw_survey.C.de_model,self.prior_params)\n self.fisher_priors = self.fisher_prior_obj.get_fisher()\n else:\n warn('Current priors do not support p_space '+str(self.sw_survey.C.p_space)+', defaulting to 0 priors')\n self.fisher_prior_obj = None\n self.fisher_priors = fm.FisherMatrix(np.zeros((self.sw_to_par_array.shape[1],self.sw_to_par_array.shape[1])),fm.REP_FISHER,fm.REP_FISHER,silent=True)\n\n print(\"MultiFisher: finished initialization\")", "def __init__(self, projects_in, project_vars_in, total_cost_in,\n total_value_in):\n cp_model.CpSolverSolutionCallback.__init__(self)\n self.p_ = projects_in\n self.p_vars_ = project_vars_in\n self.total_cost_ = total_cost_in\n self.total_value_ = total_value_in\n self.total_plans = 0", "def _check_init_solution(self):\r\n B = np.array([self._A[:, j] for j in self._basic_vars]).transpose()\r\n self._B_inv = np.linalg.inv(B)\r\n x_B = self._B_inv @ self._b\r\n for x in x_B:\r\n if x < 0:\r\n raise AssertionError(\"Initial solution is not feasible!\")", "def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file", "def setUp(self):\n domain_fname = '../domains/baxter_domain/baxter.domain'\n d_c = main.parse_file_to_dict(domain_fname)\n domain = parse_domain_config.ParseDomainConfig.parse(d_c)\n hls = hl_solver.FFSolver(d_c)\n def get_plan(p_fname, plan_str=None):\n p_c = main.parse_file_to_dict(p_fname)\n problem = parse_problem_config.ParseProblemConfig.parse(p_c, domain)\n abs_problem = hls.translate_problem(problem)\n if plan_str is not None:\n return hls.get_plan(plan_str, domain, problem)\n return hls.solve(abs_problem, domain, problem)\n self.get_plan = get_plan\n\n # Successful Problem\n # self.move_arm_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_move_arm.prob')\n # self.grab_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])\n # self.move_hold_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_move_holding.prob', ['0: MOVETOHOLDING BAXTER ROBOT_INIT_POSE ROBOT_END_POSE CAN0'])\n # self.complex_grab_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_complex_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])\n\n # Problem for testing\n # self.putdown_prob = get_plan('../domains/baxter_domain/baxter_probs/putdown_1234_0.prob', ['0: PUTDOWN BAXTER CAN0 TARGET2 ROBOT_INIT_POSE EE_TARGET2 ROBOT_END_POSE'])\n\n # Problem for test_free_attrs test\n # self.test_free_attrs_prob = get_plan('../domains/baxter_domain/baxter_probs/baxter_complex_grasp.prob', ['0: GRASP BAXTER CAN0 TARGET0 PDP_TARGET0 EE_TARGET0 ROBOT_END_POSE'])", "def office_prepare_solver(parser, args, params):\n parser.parse_known_args(args)\n control.prepare_solver(params)", "def __init__(self, benchmarks=None, rho=None, lamb=None, bias=None, **kwargs):\n\t\tTask.__init__(self, **kwargs)", "def run(self, dag):\n if self._target_basis is None and self._target is None:\n return dag\n\n qarg_indices = {qubit: index for index, qubit in enumerate(dag.qubits)}\n\n # Names of instructions assumed to supported by any backend.\n if self._target is None:\n basic_instrs = [\"measure\", \"reset\", \"barrier\", \"snapshot\", \"delay\"]\n target_basis = set(self._target_basis)\n source_basis = set(self._extract_basis(dag))\n qargs_local_source_basis = {}\n else:\n basic_instrs = [\"barrier\", \"snapshot\"]\n target_basis = self._target.keys() - set(self._non_global_operations)\n source_basis, qargs_local_source_basis = self._extract_basis_target(dag, qarg_indices)\n\n target_basis = set(target_basis).union(basic_instrs)\n\n logger.info(\n \"Begin BasisTranslator from source basis %s to target basis %s.\",\n source_basis,\n target_basis,\n )\n\n # Search for a path from source to target basis.\n search_start_time = time.time()\n basis_transforms = _basis_search(self._equiv_lib, source_basis, target_basis)\n\n qarg_local_basis_transforms = {}\n for qarg, local_source_basis in qargs_local_source_basis.items():\n expanded_target = set(target_basis)\n # For any multiqubit operation that contains a subset of qubits that\n # has a non-local operation, include that non-local operation in the\n # search. This matches with the check we did above to include those\n # subset non-local operations in the check here.\n if len(qarg) > 1:\n for non_local_qarg, local_basis in self._qargs_with_non_global_operation.items():\n if qarg.issuperset(non_local_qarg):\n expanded_target |= local_basis\n else:\n expanded_target |= self._qargs_with_non_global_operation[tuple(qarg)]\n\n logger.info(\n \"Performing BasisTranslator search from source basis %s to target \"\n \"basis %s on qarg %s.\",\n local_source_basis,\n expanded_target,\n qarg,\n )\n local_basis_transforms = _basis_search(\n self._equiv_lib, local_source_basis, expanded_target\n )\n\n if local_basis_transforms is None:\n raise TranspilerError(\n \"Unable to translate the operations in the circuit: \"\n f\"{[x[0] for x in local_source_basis]} to the backend's (or manually \"\n f\"specified) target basis: {list(expanded_target)}. This likely means the \"\n \"target basis is not universal or there are additional equivalence rules \"\n \"needed in the EquivalenceLibrary being used. For more details on this \"\n \"error see: \"\n \"https://qiskit.org/documentation/stubs/qiskit.transpiler.passes.\"\n \"BasisTranslator.html#translation_errors\"\n )\n\n qarg_local_basis_transforms[qarg] = local_basis_transforms\n\n search_end_time = time.time()\n logger.info(\n \"Basis translation path search completed in %.3fs.\", search_end_time - search_start_time\n )\n\n if basis_transforms is None:\n raise TranspilerError(\n \"Unable to translate the operations in the circuit: \"\n f\"{[x[0] for x in source_basis]} to the backend's (or manually specified) target \"\n f\"basis: {list(target_basis)}. This likely means the target basis is not universal \"\n \"or there are additional equivalence rules needed in the EquivalenceLibrary being \"\n \"used. For more details on this error see: \"\n \"https://qiskit.org/documentation/stubs/qiskit.transpiler.passes.BasisTranslator.\"\n \"html#translation_errors\"\n )\n\n # Compose found path into a set of instruction substitution rules.\n\n compose_start_time = time.time()\n instr_map = _compose_transforms(basis_transforms, source_basis, dag)\n extra_instr_map = {\n qarg: _compose_transforms(transforms, qargs_local_source_basis[qarg], dag)\n for qarg, transforms in qarg_local_basis_transforms.items()\n }\n\n compose_end_time = time.time()\n logger.info(\n \"Basis translation paths composed in %.3fs.\", compose_end_time - compose_start_time\n )\n\n # Replace source instructions with target translations.\n\n replace_start_time = time.time()\n\n def apply_translation(dag, wire_map):\n dag_updated = False\n for node in dag.op_nodes():\n node_qargs = tuple(wire_map[bit] for bit in node.qargs)\n qubit_set = frozenset(node_qargs)\n if node.name in target_basis:\n if isinstance(node.op, ControlFlowOp):\n flow_blocks = []\n for block in node.op.blocks:\n dag_block = circuit_to_dag(block)\n dag_updated = apply_translation(\n dag_block,\n {\n inner: wire_map[outer]\n for inner, outer in zip(block.qubits, node.qargs)\n },\n )\n if dag_updated:\n flow_circ_block = dag_to_circuit(dag_block)\n else:\n flow_circ_block = block\n flow_blocks.append(flow_circ_block)\n node.op = node.op.replace_blocks(flow_blocks)\n continue\n if (\n node_qargs in self._qargs_with_non_global_operation\n and node.name in self._qargs_with_non_global_operation[node_qargs]\n ):\n continue\n\n if dag.has_calibration_for(node):\n continue\n if qubit_set in extra_instr_map:\n self._replace_node(dag, node, extra_instr_map[qubit_set])\n elif (node.op.name, node.op.num_qubits) in instr_map:\n self._replace_node(dag, node, instr_map)\n else:\n raise TranspilerError(f\"BasisTranslator did not map {node.name}.\")\n dag_updated = True\n return dag_updated\n\n apply_translation(dag, qarg_indices)\n replace_end_time = time.time()\n logger.info(\n \"Basis translation instructions replaced in %.3fs.\",\n replace_end_time - replace_start_time,\n )\n\n return dag", "def construct_basis_tensors(self):\n\t\n\tu = np.array([self.cth*self.cphi, self.cth*self.sphi, -self.sth])\n\tv = np.array([self.sphi, -self.cphi, 0.0])\n\n\tep = np.outer(u,u) - np.outer(v,v)\n\tec = np.outer(u,v) + np.outer(v,u)\n\t\n\tself.ep = self.c2psi*ep - self.s2psi*ec\n\tself.ec = self.s2psi*ep + self.c2psi*ec\n\t\t\n\treturn", "def init_process(mech):\n gases[mech] = ct.Solution(mech)\n gases[mech].transport_model = 'Multi'", "def __init__(self, ea_optimizer, is_chief, task_index):\n self._ea_optimizer = ea_optimizer\n self._is_chief = is_chief\n self._task_index = task_index", "def init(self):\n self.t_f = 0\n self.h_b = [] # Heap of running tasks.\n self.h_c = [] # Heap of waiting tasks.\n self.h_d = [] # Heap of deadlines.\n self.pmin = min([task.period for task in self.task_list]) \\\n * self.sim.cycles_per_ms\n self.evt_bc = False\n self.activations = []\n self.waiting_schedule = False", "def _initialize(self):\n self._solution = self._problem_instance.build_solution(method=\"Greedy\")\n\n while not self._problem_instance.is_admissible(self._solution):\n self._solution = self._problem_instance.build_solution(method=\"Greedy\")\n \n self._problem_instance.evaluate_solution(self._solution, feedback=self._feedback)", "def _issue_first_task(self):\n\n task_dict = {\n \"task\": \"prepare_iteration\",\n \"model\": str(self.model_path),\n \"iteration_number\": self.iteration_number,\n \"iteration_name\": f\"model_{self.iteration_number:05d}\",\n \"finished\": False,\n }\n\n with open(self.task_path, \"w+\") as fh:\n toml.dump(task_dict, fh)", "def allocate(self):\n\n # a. grid\n self.create_grids()\n\n # b. solution\n self.solve_prep()\n\n # c. simulation\n self.sim.euler = np.full((self.par.T-1,self.par.eulerK,self.par.eulerK),np.nan)", "def set_up_ad(self):\n\n # Computation of the constraints mapping function\n self.mapping_function = self.compute_mapping_function()\n\n # Creation of a dictionnary containing all the stuff\n # needed to compute the jacobian\n self.jac_dict = {}\n\n # Computation of the constraints jacobian\n # sparsity pattern\n self.jac_dict['jac_sp_patt'], self.jac_dict['jac_data'], \\\n self.jac_dict['work'] = self.compute_jacobian_sparsity_patt()" ]
[ "0.75565845", "0.5916598", "0.5722016", "0.56191593", "0.55989605", "0.5507775", "0.5439479", "0.54063594", "0.53515726", "0.5298564", "0.5275407", "0.5211078", "0.5209353", "0.5185748", "0.5182779", "0.51719403", "0.5151672", "0.51347935", "0.5133514", "0.5122238", "0.5107102", "0.51056856", "0.50963116", "0.5063724", "0.5053108", "0.5043319", "0.50396025", "0.5037156", "0.50324774", "0.50204146" ]
0.72589576
1
Solve a linear equation system involving a basis matrix. solvewithbasis(self,transp_,numnz_,sub_,val_)
def solvewithbasis(self,transp_,numnz_,sub_,val_): _numnz_tmp = ctypes.c_int32(numnz_) _sub_minlength = self.getnumcon() if self.getnumcon() > 0 and sub_ is not None and len(sub_) != self.getnumcon(): raise ValueError("Array argument sub is not long enough: Is %d, expected %d" % (len(sub_),self.getnumcon())) if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable: raise ValueError("Argument sub must be writable") if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous: _sub_copyarray = False _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif sub_ is not None: _sub_copyarray = True _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32)) _sub_np_tmp[:] = sub_ assert _sub_np_tmp.flags.contiguous _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _sub_copyarray = False _sub_tmp = None _val_minlength = self.getnumcon() if self.getnumcon() > 0 and val_ is not None and len(val_) != self.getnumcon(): raise ValueError("Array argument val is not long enough: Is %d, expected %d" % (len(val_),self.getnumcon())) if isinstance(val_,numpy.ndarray) and not val_.flags.writeable: raise ValueError("Argument val must be writable") if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous: _val_copyarray = False _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif val_ is not None: _val_copyarray = True _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64)) _val_np_tmp[:] = val_ assert _val_np_tmp.flags.contiguous _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _val_copyarray = False _val_tmp = None res = __library__.MSK_XX_solvewithbasis(self.__nativep,transp_,ctypes.byref(_numnz_tmp),_sub_tmp,_val_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _numnz_return_value = _numnz_tmp.value if _sub_copyarray: sub_[:] = _sub_np_tmp if _val_copyarray: val_[:] = _val_np_tmp return (_numnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solvewithbasis(self,transp_,numnz_,sub,val): # 3\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != self.getnumcon():\n raise ValueError(\"Array argument sub has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != self.getnumcon():\n raise ValueError(\"Array argument val has wrong length\")\n res,resargs = self.__obj.solvewithbasis(transp_,numnz_,sub_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_sub:\n sub[:] = _tmparr_sub\n return _numnz_return_value", "def new_basis(abc, lattice):\n return np.dot(abc.T, lattice.inv_matrix.T)", "def initbasissolve(self,basis): # 3\n _copyback_basis = False\n if basis is None:\n basis_ = None\n else:\n try:\n basis_ = memoryview(basis)\n except TypeError:\n try:\n _tmparr_basis = array.array(\"i\",basis)\n except TypeError:\n raise TypeError(\"Argument basis has wrong type\")\n else:\n basis_ = memoryview(_tmparr_basis)\n _copyback_basis = True\n else:\n if basis_.format != \"i\":\n basis_ = memoryview(array.array(\"i\",basis))\n _copyback_basis = True\n if basis_ is not None and len(basis_) != self.getnumcon():\n raise ValueError(\"Array argument basis has wrong length\")\n res = self.__obj.initbasissolve(basis_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_basis:\n basis[:] = _tmparr_basis", "def eval_basis(self, maps):\n if self.eval_method == ['grad']:\n val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))\n return val\n\n elif self.eval_method == ['val']:\n return self.bfref\n\n elif self.eval_method == ['div']:\n val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))\n val = nm.atleast_3d(nm.einsum('ijkk', val))\n return val\n\n elif self.eval_method == ['grad', 'sym', 'Man']:\n val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))\n from sfepy.terms.terms_general import proceed_methods\n val = proceed_methods(val, self.eval_method[1:])\n return val\n\n else:\n msg = \"Improper method '%s' for evaluation of basis functions\" \\\n % (self.eval_method)\n raise NotImplementedError(msg)", "def initbasissolve(self,basis_):\n _basis_minlength = self.getnumcon()\n if self.getnumcon() > 0 and basis_ is not None and len(basis_) != self.getnumcon():\n raise ValueError(\"Array argument basis is not long enough: Is %d, expected %d\" % (len(basis_),self.getnumcon()))\n if isinstance(basis_,numpy.ndarray) and not basis_.flags.writeable:\n raise ValueError(\"Argument basis must be writable\")\n if isinstance(basis_, numpy.ndarray) and basis_.dtype is numpy.dtype(numpy.int32) and basis_.flags.contiguous:\n _basis_copyarray = False\n _basis_tmp = ctypes.cast(basis_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif basis_ is not None:\n _basis_copyarray = True\n _basis_np_tmp = numpy.zeros(len(basis_),numpy.dtype(numpy.int32))\n _basis_np_tmp[:] = basis_\n assert _basis_np_tmp.flags.contiguous\n _basis_tmp = ctypes.cast(_basis_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _basis_copyarray = False\n _basis_tmp = None\n \n res = __library__.MSK_XX_initbasissolve(self.__nativep,_basis_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _basis_copyarray:\n basis_[:] = _basis_np_tmp", "def calc_j(basis):\n b1 = basis[0]\n b2 = basis[1]\n T = b1.shape[1]\n integrand11 = zeros(T)\n integrand12 = zeros(T)\n integrand22 = zeros(T)\n\n for i in range(0, T):\n a = b1[:, i].T\n b = b2[:, i].T\n integrand11[i] = a.dot(b1[:, i])\n integrand12[i] = a.dot(b2[:, i])\n integrand22[i] = b.dot(b2[:, i])\n\n j = zeros((2, 2))\n j[0, 0] = trapz(integrand11, linspace(0, 1, T))\n j[0, 1] = trapz(integrand12, linspace(0, 1, T))\n j[1, 1] = trapz(integrand22, linspace(0, 1, T))\n j[1, 0] = j[0, 1]\n\n return (j)", "def affine_subbasis(mode, dim=3, sub=None, dtype='float64'):\n\n # Authors\n # -------\n # .. John Ashburner <[email protected]> : original Matlab code\n # .. Yael Balbastre <[email protected]> : Python code\n\n # Check if sub passed in mode\n mode = mode.split('[')\n if len(mode) > 1:\n if sub is not None:\n raise ValueError('Cannot use both ``mode`` and ``sub`` '\n 'to specify a sub-basis.')\n sub = '[' + mode[1]\n sub = literal_eval(sub) # Safe eval for list of native types\n mode = mode[0]\n\n dim = unik.reshape(dim, ())\n if not unik.is_tensor(dim, 'tf') and dim not in (1, 2, 3):\n raise ValueError('dim must be one of 1, 2, 3')\n if mode not in affine_subbasis_choices:\n raise ValueError('mode must be one of {}.'\n .format(affine_subbasis_choices))\n\n # Compute the basis\n\n if mode == 'T':\n basis = unik.zeros((dim, dim+1, dim+1), dtype=dtype)\n def body(basis, i):\n return unik.scatter([[i, i, dim]], [1],\n basis, mode='update'), i+1\n def cond(_, i): return i < dim\n basis = unik.while_loop(cond, body, [basis, 0])[0]\n\n elif mode == 'Z':\n basis = unik.zeros((dim, dim+1, dim+1), dtype=dtype)\n def body(basis, i):\n return unik.scatter([[i, i, i]], [1],\n basis, mode='update'), i+1\n def cond(_, i): return i < dim\n basis = unik.while_loop(cond, body, [basis, 0])[0]\n\n elif mode == 'Z0':\n basis = unik.zeros((dim-1, dim+1), dtype=dtype)\n def body(basis, i):\n return unik.scatter([[i, i], [i, i+1]], [1, -1],\n basis, mode='update'), i+1\n def cond(_, i): return i < dim-1\n basis = unik.while_loop(cond, body, [basis, 0])[0]\n # Orthogonalise numerically (is there an analytical form?)\n u, s, v = unik.svd(basis)\n basis = unik.mm(unik.transpose(u), basis)\n basis = unik.mm(basis, v)\n basis = unik.lmdiv(unik.diag(s), basis)\n basis = unik.map_fn(unik.diag, basis)\n\n elif mode == 'I':\n basis = unik.zeros((1, dim+1, dim+1), dtype=dtype)\n def body(basis, i):\n return unik.scatter([[0, i, i]], [1], basis, mode='update'), i+1\n def cond(_, i): return i < dim\n basis = unik.while_loop(cond, body, [basis, 0])[0]\n\n elif mode == 'R':\n basis = unik.zeros((dim*(dim-1)//2, dim+1, dim+1), dtype=dtype)\n def body(basis, i, j, k):\n ind = [[k, i, j], [k, j, i]]\n val = [1/np.sqrt(2), -1/np.sqrt(2)]\n basis = unik.scatter(ind, val, basis, mode='update')\n j = unik.cond(j+1 < dim, lambda: j+1, lambda: 0)\n i = unik.cond(j == 0, lambda: i+1, lambda: i)\n j = unik.cond(j == 0, lambda: i+1, lambda: j)\n k = k + 1\n return basis, i, j, k\n def cond(_, i, j, k): return (i < dim) & (j < dim)\n basis = unik.while_loop(cond, body, [basis, 0, 1, 0])[0]\n\n elif mode == 'S':\n basis = unik.zeros((dim*(dim-1)//2, dim+1, dim+1), dtype=dtype)\n def body(basis, i, j, k):\n ind = [[k, i, j], [k, j, i]]\n val = [1/np.sqrt(2), 1/np.sqrt(2)]\n basis = unik.scatter(ind, val, basis, mode='update')\n j = unik.cond(j+1 < dim, lambda: j+1, lambda: 0)\n i = unik.cond(j == 0, lambda: i+1, lambda: i)\n j = unik.cond(j == 0, lambda: i+1, lambda: j)\n k = k + 1\n return basis, i, j, k\n def cond(_, i, j, k): return (i < dim) & (j < dim)\n basis = unik.while_loop(cond, body, [basis, 0, 1, 0])[0]\n\n else:\n # We should never reach this (a test was performed earlier)\n raise ValueError\n\n # Select subcomponents of the basis\n if sub is not None:\n try:\n sub = list(sub)\n except TypeError:\n sub = [sub]\n basis = unik.stack([basis[i, ...] for i in sub])\n\n return unik.cast(basis, dtype)", "def design_matrix(x, basis, degree=0):\n # TO DO:: Compute desing matrix for each of the basis functions\n if basis == 'polynomial':\n result=None\n for i in range(1,degree+1):\n newMatrix=np.power(x,i)\n if result is None:\n result=newMatrix\n else:\n result=np.hstack((result,newMatrix))\n #initialize a column of ones to concat to final result\n res_rows=result.shape[0]\n ones_col=np.ones((res_rows,1))\n phi=np.hstack((ones_col,result))\n #phi=result[...,2:]\n elif basis == 'ReLU':\n result=None\n newMatrix=np.negative(x)\n newMatrix=np.add(newMatrix,5000)\n\n reLUtrix=np.maximum(newMatrix,0,newMatrix)\n if result is None:\n result=reLUtrix\n else:\n result=np.hstack((result,reLUtrix))\n res_rows=result.shape[0]\n ones_col=np.ones((res_rows,1))\n phi = np.hstack((ones_col,result))\n # Debug statement feel free to comment out\n #print(\"Value of phi\",phi)\n else:\n assert(False), 'Unknown basis %s' % basis\n\n return phi", "def set_basis(self, maps, methods):\n self.eval_method = methods\n\n def get_grad(maps, shape):\n bfref0 = eval_base(maps.qp_coor, diff=True).swapaxes(1, 2)\n if shape == (1,): # scalar variable\n bfref = bfref0\n elif len(shape) == 1: # vector variable\n vec_shape = nm.array(bfref0.shape + shape)\n vec_shape[1] *= shape[0]\n bfref = nm.zeros(vec_shape)\n for ii in nm.arange(shape[0]):\n slc = slice(ii*bfref0.shape[1], (ii+1)*bfref0.shape[1])\n bfref[:, slc, ii] = bfref0\n else: # higher-order tensors variable\n msg = \"Evaluation of basis has not been implemented \\\n for higher-order tensors yet.\"\n raise NotImplementedError(msg)\n return bfref\n\n def get_val(maps, shape):\n bfref0 = eval_base(maps.qp_coor, diff=False).swapaxes(1, 2)\n\n if self.shape == (1,): # scalar variable\n bfref = bfref0\n elif len(shape) == 1:\n vec_shape = nm.array(bfref0.shape)\n vec_shape[1:3] *= shape[0]\n bfref = nm.zeros(vec_shape)\n for ii in nm.arange(shape[0]):\n slc = slice(ii*bfref0.shape[1], (ii+1)*bfref0.shape[1])\n bfref[:, slc] = bfref0\n else: # higher-order tensors variable\n msg = \"Evaluation of basis has not been implemented \\\n for higher-order tensors yet.\"\n raise NotImplementedError(msg)\n return bfref\n\n eval_base = self.interp.poly_spaces['v'].eval_base\n if self.eval_method[0] == 'val':\n bfref = get_val(maps, self.shape)\n\n elif self.eval_method[0] == 'grad':\n bfref = get_grad(maps, self.shape)\n\n elif self.eval_method[0] == 'div':\n bfref = get_grad(maps, self.shape)\n\n else:\n raise NotImplementedError(\"The method '%s' is not implemented\" \\\n % (self.eval_method))\n\n self.bfref = bfref\n self.n_basis = self.bfref.shape[1]", "def _solve_subproblem(\n self, x_eval: Tensor, x_neg: Tensor, X_pos: Tensor,\n cache_book: dict\n ) -> Tensor:\n\n # Q = A @ A.t()\n A, b, Q = self._qp_params(\n x_eval, x_neg, X_pos, cache_book\n )\n lamda = self._qp_solver(Q, b)\n\n return -A.t() @ lamda", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def sub_backsolve(self, b, transp='N'):\n \n # Case where b, and xsol are 1-D arrays\n if b.ndim==1:\n \n print \"Running sub_backsolve routine b.ndim=1.\"\n \n # b must have m elements or this doesn't make sense\n if len(b)!=self.m:\n raise ValueError(\"Length of b does not equal m in sub_backsolve b.ndim==1.\")\n #assert len(b)==self.m\n \n # Remove the known part from b\n bpart = b - self.r\n \n # Get the unknown part of b\n bsub = bpart[self.unknown_inds]\n \n # compute the unknown displacements\n xsub = self.Asub_factorized(bsub.astype(numpy.float64), trans=transp)\n #xsub = self.Asub_factorized.backsolve(bsub.astype(numpy.float64), trans=transp)\n \n # reconstruct the full solution vector\n x = numpy.zeros_like(b);\n x[self.unknown_inds] = xsub;\n x[self.xinds] = self.xsol;\n\n # Case where b is an m x p matrix, and xsol is an n x p matrix\n elif b.ndim==2:\n \n print \"Running sub_backsolve routine b.ndim=2.\"\n \n b_m, b_p = b.shape\n \n if b_m != self.m:\n raise ValueError('b_m not equal to self.m')\n if b_p != self.xsol.shape[1]:\n raise ValueError('b_p not equal to self.xsol.shape[1]')\n\n x = numpy.zeros((b_m, b_p))\n \n bpart = b - self.r\n bsub = bpart[self.unknown_inds,:]\n\n for k in range(b_p):\n xsub = self.Asub_factorized(bsub[:,k].astype(numpy.float64), trans=transp)\n #xsub = self.Asub_factorized.backsolve(bsub[:,k].astype(numpy.float64), trans=transp)\n x[self.unknown_inds,k] = xsub;\n x[self.xinds,k] = self.xsol[:,k]\n \n print \"Done with sub_backsolve.\"\n\n return x", "def jacobian(self, b):\n \n # Substitute parameters in partial derivatives\n subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs]\n # Evaluate substituted partial derivatives for all x-values\n vals = [sp.lambdify(self._x, sub, \"numpy\")(self.xvals) for sub in subs]\n # Arrange values in column-major order\n return np.column_stack(vals)", "def construct_basis_tensors(self):\n\t\n\tu = np.array([self.cth*self.cphi, self.cth*self.sphi, -self.sth])\n\tv = np.array([self.sphi, -self.cphi, 0.0])\n\n\tep = np.outer(u,u) - np.outer(v,v)\n\tec = np.outer(u,v) + np.outer(v,u)\n\t\n\tself.ep = self.c2psi*ep - self.s2psi*ec\n\tself.ec = self.s2psi*ep + self.c2psi*ec\n\t\t\n\treturn", "def quadraticBasisUpdate(basis, Res, ahat, lowestActivation, HessianDiag, stepSize, sigma, constraint = 'L2', Noneg = False):\n dBasis = stepSize/sigma**2*torch.mm(Res, ahat.t())/ahat.size(1)\n dBasis = dBasis.div_(HessianDiag+lowestActivation)\n basis = basis.add_(dBasis)\n if Noneg:\n basis = basis.clamp(min = 0.)\n if constraint == 'L2':\n basis = basis.div_(basis.norm(2,0))\n return basis", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def _compute_terms_to_make_leading_submatrix_singular(hessian_info, k):\n hessian_plus_lambda = hessian_info.hessian_plus_lambda\n upper_triangular = hessian_info.upper_triangular\n n = len(hessian_plus_lambda)\n\n delta = (\n np.sum(upper_triangular[: k - 1, k - 1] ** 2)\n - hessian_plus_lambda[k - 1, k - 1]\n )\n\n v = np.zeros(n)\n v[k - 1] = 1\n\n if k != 1:\n v[: k - 1] = solve_triangular(\n upper_triangular[: k - 1, : k - 1], -upper_triangular[: k - 1, k - 1]\n )\n\n return delta, v", "def _old_linearize(self):\n\n if (self._fr is None) or (self._frstar is None):\n raise ValueError('Need to compute Fr, Fr* first.')\n\n # Note that this is now unneccessary, and it should never be\n # encountered; I still think it should be in here in case the user\n # manually sets these matrices incorrectly.\n for i in self.q:\n if self._k_kqdot.diff(i) != 0 * self._k_kqdot:\n raise ValueError('Matrix K_kqdot must not depend on any q.')\n\n t = dynamicsymbols._t\n uaux = self._uaux\n uauxdot = [diff(i, t) for i in uaux]\n # dictionary of auxiliary speeds & derivatives which are equal to zero\n subdict = dict(zip(uaux[:] + uauxdot[:],\n [0] * (len(uaux) + len(uauxdot))))\n\n # Checking for dynamic symbols outside the dynamic differential\n # equations; throws error if there is.\n insyms = set(self.q[:] + self._qdot[:] + self.u[:] + self._udot[:] +\n uaux[:] + uauxdot)\n if any(find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,\n self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):\n raise ValueError('Cannot have dynamicsymbols outside dynamic \\\n forcing vector.')\n other_dyns = list(find_dynamicsymbols(msubs(self._f_d, subdict), insyms))\n\n # make it canonically ordered so the jacobian is canonical\n other_dyns.sort(key=default_sort_key)\n\n for i in other_dyns:\n if diff(i, dynamicsymbols._t) in other_dyns:\n raise ValueError('Cannot have derivatives of specified '\n 'quantities when linearizing forcing terms.')\n\n o = len(self.u) # number of speeds\n n = len(self.q) # number of coordinates\n l = len(self._qdep) # number of configuration constraints\n m = len(self._udep) # number of motion constraints\n qi = Matrix(self.q[: n - l]) # independent coords\n qd = Matrix(self.q[n - l: n]) # dependent coords; could be empty\n ui = Matrix(self.u[: o - m]) # independent speeds\n ud = Matrix(self.u[o - m: o]) # dependent speeds; could be empty\n qdot = Matrix(self._qdot) # time derivatives of coordinates\n\n # with equations in the form MM udot = forcing, expand that to:\n # MM_full [q,u].T = forcing_full. This combines coordinates and\n # speeds together for the linearization, which is necessary for the\n # linearization process, due to dependent coordinates. f1 is the rows\n # from the kinematic differential equations, f2 is the rows from the\n # dynamic differential equations (and differentiated non-holonomic\n # constraints).\n f1 = self._k_ku * Matrix(self.u) + self._f_k\n f2 = self._f_d\n # Only want to do this if these matrices have been filled in, which\n # occurs when there are dependent speeds\n if m != 0:\n f2 = self._f_d.col_join(self._f_dnh)\n fnh = self._f_nh + self._k_nh * Matrix(self.u)\n f1 = msubs(f1, subdict)\n f2 = msubs(f2, subdict)\n fh = msubs(self._f_h, subdict)\n fku = msubs(self._k_ku * Matrix(self.u), subdict)\n fkf = msubs(self._f_k, subdict)\n\n # In the code below, we are applying the chain rule by hand on these\n # things. All the matrices have been changed into vectors (by\n # multiplying the dynamic symbols which it is paired with), so we can\n # take the jacobian of them. The basic operation is take the jacobian\n # of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of\n # q, u, and t; f2 is a function of q, qdot, u, and t. In the code\n # below, we are not considering perturbations in t. So if f1 is a\n # function of the q's, u's but some of the q's or u's could be\n # dependent on other q's or u's (qd's might be dependent on qi's, ud's\n # might be dependent on ui's or qi's), so what we do is take the\n # jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's\n # gets multiplied by the jacobian of qd wrt qi, this is extended for\n # the ud's as well. dqd_dqi is computed by taking a taylor expansion of\n # the holonomic constraint equations about q*, treating q* - q as dq,\n # separating into dqd (depedent q's) and dqi (independent q's) and the\n # rearranging for dqd/dqi. This is again extended for the speeds.\n\n # First case: configuration and motion constraints\n if (l != 0) and (m != 0):\n fh_jac_qi = fh.jacobian(qi)\n fh_jac_qd = fh.jacobian(qd)\n fnh_jac_qi = fnh.jacobian(qi)\n fnh_jac_qd = fnh.jacobian(qd)\n fnh_jac_ui = fnh.jacobian(ui)\n fnh_jac_ud = fnh.jacobian(ud)\n fku_jac_qi = fku.jacobian(qi)\n fku_jac_qd = fku.jacobian(qd)\n fku_jac_ui = fku.jacobian(ui)\n fku_jac_ud = fku.jacobian(ud)\n fkf_jac_qi = fkf.jacobian(qi)\n fkf_jac_qd = fkf.jacobian(qd)\n f1_jac_qi = f1.jacobian(qi)\n f1_jac_qd = f1.jacobian(qd)\n f1_jac_ui = f1.jacobian(ui)\n f1_jac_ud = f1.jacobian(ud)\n f2_jac_qi = f2.jacobian(qi)\n f2_jac_qd = f2.jacobian(qd)\n f2_jac_ui = f2.jacobian(ui)\n f2_jac_ud = f2.jacobian(ud)\n f2_jac_qdot = f2.jacobian(qdot)\n\n dqd_dqi = - fh_jac_qd.LUsolve(fh_jac_qi)\n dud_dqi = fnh_jac_ud.LUsolve(fnh_jac_qd * dqd_dqi - fnh_jac_qi)\n dud_dui = - fnh_jac_ud.LUsolve(fnh_jac_ui)\n dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +\n fku_jac_ud * dud_dui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +\n (fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)\n f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi\n f1_u = f1_jac_ui + f1_jac_ud * dud_dui\n f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +\n f2_jac_ud * dud_dqi)\n f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui\n # Second case: configuration constraints only\n elif l != 0:\n dqd_dqi = - fh.jacobian(qd).LUsolve(fh.jacobian(qi))\n dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *\n dqd_dqi)\n f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)\n f1_u = f1.jacobian(ui)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +\n f2.jac_qdot * dqdot_dqi)\n f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui\n # Third case: motion constraints only\n elif m != 0:\n dud_dqi = fnh.jacobian(ud).LUsolve(- fnh.jacobian(qi))\n dud_dui = - fnh.jacobian(ud).LUsolve(fnh.jacobian(ui))\n dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +\n fku.jacobian(ud) * dud_dui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)\n f1_jac_ud = f1.jacobian(ud)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_jac_ud = f2.jacobian(ud)\n f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi\n f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui\n f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud\n * dud_dqi)\n f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *\n dqdot_dui)\n # Fourth case: No constraints\n else:\n dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi))\n f1_q = f1.jacobian(qi)\n f1_u = f1.jacobian(ui)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi\n f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui\n f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))\n if other_dyns:\n f1_oths = f1.jacobian(other_dyns)\n f2_oths = f2.jacobian(other_dyns)\n f_lin_B = -f1_oths.col_join(f2_oths)\n else:\n f_lin_B = Matrix()\n return (f_lin_A, f_lin_B, Matrix(other_dyns))", "def solve(self, sparse_args=None):\n Hmat = self.Hamiltonian()\n if sparse_args is not None: self.sparse_args = sparse_args\n if self.sparse_args is None:\n en, ev = eig(Hmat.todense())\n else:\n en, ev = eigsh(Hmat, **self.sparse_args)\n ev = transpose(array(ev))[argsort(en)]\n en = sort(en)\n self.en = en\n self.ev = ev\n self.solved = True\n return self.en, self.ev", "def design_matrix(x,basis=None,degree=1,bias=True,mu=None,s=1):\n \n if basis == 'polynomial':\n if(degree==1): \n if bias == True: \n x=np.append(np.ones((len(x),1)).astype(int),values=x,axis=1) \n phi=x\n else:\n pass \n else:\n newMatrix=x\n for i in range(2,degree+1):\n temp=np.power(x,i)\n newMatrix=np.concatenate((newMatrix,temp),axis=1)\n if bias == True:\n newMatrix=np.append(np.ones((len(newMatrix),1)).astype(int),values=newMatrix,axis=1)\n phi=newMatrix \n \n elif basis == 'sigmoid':\n\n for i in mu:\n if(i==mu[0]):\n temp= (x-i)/s\n phi1=1/(1+np.exp(-temp))\n phi=phi1\n else:\n temp= (x-i)/s\n phi1=1/(1+np.exp(-temp))\n phi=np.concatenate((phi,phi1),axis=1)\n phi=np.append(np.ones((len(phi),1)).astype(int),values=phi,axis=1)\n else: \n assert(False), 'Unknown basis %s' % basis\n\n return phi", "def getsparsesymmat(self,idx_,subi_,subj_,valij_):\n maxlen_ = self.getsymmatinfo((idx_))[1]\n _subi_minlength = (maxlen_)\n if (maxlen_) > 0 and subi_ is not None and len(subi_) != (maxlen_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxlen_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxlen_)\n if (maxlen_) > 0 and subj_ is not None and len(subj_) != (maxlen_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxlen_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _valij_minlength = (maxlen_)\n if (maxlen_) > 0 and valij_ is not None and len(valij_) != (maxlen_):\n raise ValueError(\"Array argument valij is not long enough: Is %d, expected %d\" % (len(valij_),(maxlen_)))\n if isinstance(valij_,numpy.ndarray) and not valij_.flags.writeable:\n raise ValueError(\"Argument valij must be writable\")\n if isinstance(valij_, numpy.ndarray) and valij_.dtype is numpy.dtype(numpy.float64) and valij_.flags.contiguous:\n _valij_copyarray = False\n _valij_tmp = ctypes.cast(valij_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valij_ is not None:\n _valij_copyarray = True\n _valij_np_tmp = numpy.zeros(len(valij_),numpy.dtype(numpy.float64))\n _valij_np_tmp[:] = valij_\n assert _valij_np_tmp.flags.contiguous\n _valij_tmp = ctypes.cast(_valij_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valij_copyarray = False\n _valij_tmp = None\n \n res = __library__.MSK_XX_getsparsesymmat(self.__nativep,idx_,maxlen_,_subi_tmp,_subj_tmp,_valij_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _valij_copyarray:\n valij_[:] = _valij_np_tmp", "def set_rhs(self):\n k = self.istore[:, 0]\n ksym = self.stencil.get_symmetric()[k]\n self.rhs[:] = self.feq[k, np.arange(k.size)] - self.feq[ksym, np.arange(k.size)]", "def set_rhs(self):\n k = self.istore[:, 0]\n ksym = self.stencil.get_symmetric()[k]\n self.rhs[:] = self.feq[k, np.arange(k.size)] - self.feq[ksym, np.arange(k.size)]", "def our_own_bvp_solve(f, a, b, n, y0, dim, bc, tol=1e-2):\n\n # interpolate the initial guess function y0 on Chebyshev points of the first kind\n cf0 = []\n for y0_i in y0:\n for thing in np.polynomial.chebyshev.Chebyshev(np.zeros(n), (a, b)).interpolate(y0_i, n, (a, b)):\n cf0.append(thing)\n\n solution = root(lambda u: fun(u, a, b, dim, n, f, bc), cf0, method='lm', tol=tol)\n if not solution.success:\n print('root finding failed')\n\n cf = solution.x\n cf = cf.reshape((dim, cf.size // dim))\n\n return [np.polynomial.chebyshev.Chebyshev(cf[i], (a, b)) for i in range(dim)]", "def kktsolver(x, z, W):\n\n if debug:\n logger.debug(\"Setup kkt solver\")\n logger.debug(\"W\")\n for key in W.keys():\n logger.debug(\"key: %s\" % (key,))\n logger.debug(W[key])\n\n H = hessian(x)\n if debug:\n logger.debug(\"diag H\")\n logger.debug(np.diag(H))\n _H = cvxopt.spdiag(list(np.diag(H))) if H is not None else None\n\n wdi = W[\"di\"]\n Wdi2 = cvxopt.spdiag(cvxopt.mul(wdi, wdi))\n\n S = G.T * Wdi2\n P = S * G\n\n Q = _H + P\n # now, do the cholesky decomposition of Q\n cvxopt.lapack.potrf(Q)\n\n if False and fn is not None:\n logger.debug(\"At setup f(x) = %d\" % (fn(np.array(list(x))),))\n\n def f(x, y, z):\n if False and fn is not None:\n logger.debug(\"f(x) = %d\" % (fn(np.array(list(x))),))\n try:\n # logger.debug(\"Compute x := S * z + x...\")\n cvxopt.blas.gemv(S, z, x, alpha=1.0, beta=1.0) # x = S * z + x\n cvxopt.lapack.potrs(Q, x)\n except BaseException as e:\n logger.debug(exception_to_string(sys.exc_info()))\n raise e\n cvxopt.blas.gemv(G, x, z, alpha=1.0, beta=-1.0) # z = _G * x - z\n z[:] = cvxopt.mul(wdi, z) # scaled z\n # raise NotImplementedError(\"Method Not implemented yet\")\n return f", "def newtonJacobian(self,r):\n #x_vec=np.array(r)\n x=r[0]\n y=r[1]\n jacobi=np.zeros([2,2], float)\n \n \n jacobi[0][0]=(4.0*(self.x_0-x)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][1]=(4.0*(self.y_0-y)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][0]=4.0*(self.x_0-x)*(self.y_0-y)*self.sfunc(x,y)\n jacobi[0][1]=jacobi[1][0]\n #print \"newton jacobian is \",jacobi\n try:\n return mat.inv(jacobi)\n except:\n print \"singular jacobi not invertable\"\n return 0", "def getsparsesymmat(self,idx_,subi,subj,valij): # 3\n maxlen_ = self.getsymmatinfo((idx_))[1]\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxlen_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxlen_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_valij = False\n if valij is None:\n valij_ = None\n else:\n try:\n valij_ = memoryview(valij)\n except TypeError:\n try:\n _tmparr_valij = array.array(\"d\",valij)\n except TypeError:\n raise TypeError(\"Argument valij has wrong type\")\n else:\n valij_ = memoryview(_tmparr_valij)\n _copyback_valij = True\n else:\n if valij_.format != \"d\":\n valij_ = memoryview(array.array(\"d\",valij))\n _copyback_valij = True\n if valij_ is not None and len(valij_) != (maxlen_):\n raise ValueError(\"Array argument valij has wrong length\")\n res = self.__obj.getsparsesymmat(idx_,maxlen_,subi_,subj_,valij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_valij:\n valij[:] = _tmparr_valij\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def collocation(lhs, rhs, basis, nterms, domain=(0, 1), x_col=None):\n x0, x1 = domain\n if x_col is None:\n dx = S(x1 - x0)/(nterms - 2)\n x_col = [dx + dx*cont for cont in range(nterms)]\n x = symbols(\"x\")\n A_mat = zeros(nterms, nterms)\n b_vec = zeros(nterms, 1)\n for row in range(nterms):\n b_vec[row] = rhs(x_col[row])\n for col in range(nterms):\n phi_j = basis(x, col)\n A_mat[row, col] = lhs(phi_j, x).subs(x, x_col[row])\n return A_mat, b_vec", "def eval(self, theta, force=False):\n \n self.update_A_b(theta, force)\n \n if self.b.ndim != 2:\n raise ValueError(\"self.b.ndim not equal to 2.\")\n \n n,p = self.b.shape\n \n #x = numpy.zeros_like(self.b)\n #for k in range(p):\n # x[:,k] = self.solver.backsolve(self.b[:,k], transp='N')\n #return x\n \n # Using the multiple-r.h.s capability of solver.backsolve\n return self.solver.backsolve(self.b)" ]
[ "0.8131871", "0.59714824", "0.5729135", "0.5628742", "0.56091446", "0.55008405", "0.5451734", "0.5344914", "0.53239274", "0.5203088", "0.52024055", "0.5150057", "0.50754064", "0.50556207", "0.5001627", "0.49995735", "0.49937603", "0.49653995", "0.49574485", "0.4943281", "0.49304685", "0.4927475", "0.49267906", "0.49267906", "0.4922202", "0.4910648", "0.4906892", "0.4882394", "0.48795065", "0.48710275" ]
0.81305534
1
Computes conditioning information for the basis matrix. basiscond(self)
def basiscond(self): nrmbasis_ = ctypes.c_double() nrminvbasis_ = ctypes.c_double() res = __library__.MSK_XX_basiscond(self.__nativep,ctypes.byref(nrmbasis_),ctypes.byref(nrminvbasis_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) nrmbasis_ = nrmbasis_.value _nrmbasis_return_value = nrmbasis_ nrminvbasis_ = nrminvbasis_.value _nrminvbasis_return_value = nrminvbasis_ return (_nrmbasis_return_value,_nrminvbasis_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basiscond(self): # 3\n res,resargs = self.__obj.basiscond()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nrmbasis_return_value,_nrminvbasis_return_value = resargs\n return _nrmbasis_return_value,_nrminvbasis_return_value", "def _therm_cond(self):\n xy = dict() # used to label the components e.g 1->CO2,2->N2\n for (i, j) in enumerate(self.component_list, 1):\n xy[i] = j\n\n k_vap = 0\n for i in range(1, len(self.component_list) + 1):\n sumij = 0\n for j in range(1, len(self.component_list) + 1):\n Aij = (1 + (self.visc_d_comp[xy[i]] / self.visc_d_comp[xy[j]])**0.5 *\n (self.mw_comp[xy[j]] / self.mw_comp[xy[i]])**0.25)**2 *\\\n (8 * (1 + self.mw_comp[xy[i]] / self.mw_comp[xy[j]]))**-0.5\n sumij += self.mole_frac_comp[xy[j]] * Aij\n k_vap += self.mole_frac_comp[xy[i]] * self.therm_cond_comp[xy[i]] / sumij\n\n try:\n self.therm_cond = Expression(expr=k_vap,\n doc='Vapor thermal'\n 'conductivity [J/(m.K.s)]')\n except AttributeError:\n self.del_component(self.therm_cond)\n raise", "def check_condor(self):\n\n self.cluster=self.info['mw_run']['1']\n self.norm_with_cross=self.info['mw_run']['4']\n self.condor_req=self.info['mw_run']['11']\n\n #type is automaticaly updated now\n #self.cluster=int(condor)\n #if norm_with_cross==\"F\":\n # self.norm_with_cross=0\n #else:\n # self.norm_with_cross=1", "def set_initial_condition(self):\n X0 = np.array([0.5, 0.5])\n XB = self.bary\n q0 = 1 + np.exp(-0.5*(np.sum((XB-X0[np.newaxis])**2., axis=1))/0.1**2)\n q1 = np.zeros(q0.shape)\n #import pdb; pdb.set_trace()\n return np.array([q0, q1, q1]).T", "def _condition_number(self):\n ev = np.linalg.eig(np.matmul(self.x.swapaxes(-2, -1), self.x))[0]\n return np.sqrt(ev.max(axis=1) / ev.min(axis=1))", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def cJPD(net,nodeTuple,cond_node,cond_state):\r\n \r\n priorA = getCondPrior(net,nodeTuple[0],cond_node,cond_state)\r\n statesA = net.NodeStates(nodeTuple[0],naming = 'titlename') \r\n statesB = net.NodeStates(nodeTuple[1],naming = 'titlename')\r\n numstatesA = [float(i) for i in statesA] \r\n numstatesB = [float(i) for i in statesB] \r\n output = np.zeros((len(statesA)+1,len(statesB)+1))\r\n output[0,1:] = numstatesB\r\n output[1:,0] = numstatesA\r\n for n,i in enumerate(statesA):\r\n if priorA[0][n]== 0: \r\n output[n+1][1:] = np.array([0.0]*len(statesB))\r\n print 'zero belief vector'\r\n else: \r\n output[n+1][1:] = np.array(net.Findings([cond_node,nodeTuple[0]],[cond_state,n],nodeTuple[1],output = 'name',ranks = [False,True]))*priorA[0][n]\r\n return output", "def condition(self) -> ExpressionNode:\n return self.__condition", "def get_conditions(self):\n return (self.temp, self.humid)", "def assemble_matrices(self):\n \n #Pointer reassignment for convenience\n N = self.ngrids\n\n #Begin with a linked-list data structure for the transmissibilities,\n #and one-dimenstional arrays for the diagonal of B and the flux vector\n T = lil_matrix((N, N), dtype=np.double)\n B = np.zeros(N, dtype=np.double)\n Q = np.zeros(N, dtype=np.double)\n\n #Read in boundary condition types and values\n bcs = self.input_data['boundary conditions']\n bc_type_1 = bcs['left']['type'].lower()\n bc_type_2 = bcs['right']['type'].lower()\n bc_value_1 = bcs['left']['value']\n bc_value_2 = bcs['right']['value']\n \n #Loop over all grid cells\n for i in range(N):\n\n #Apply left BC\n if i == 0:\n T[i, i+1] = -self.compute_transmissibility(i, i + 1)\n\n if bc_type_1 == 'neumann':\n T[i, i] = T[i,i] - T[i, i+1]\n elif bc_type_1 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i,i] - T[i, i+1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_1\n else:\n pass #TODO: Add error checking here if no bc is specified\n\n #Apply right BC\n elif i == (N - 1):\n T[i, i-1] = -self.compute_transmissibility(i, i - 1)\n\n if bc_type_2 == 'neumann':\n T[i, i] = T[i,i] - T[i, i-1]\n elif bc_type_2 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i, i] - T[i, i-1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_2\n else:\n pass #TODO:Add error checking here if no bc is specified\n\n #If there is no boundary condition compute interblock transmissibilties\n else:\n T[i, i-1] = -self.compute_transmissibility(i, i-1)\n T[i, i+1] = -self.compute_transmissibility(i, i+1)\n T[i, i] = (self.compute_transmissibility(i, i-1) +\n self.compute_transmissibility(i, i+1))\n\n #Compute accumulations\n B[i] = self.compute_accumulation(i)\n\n #If constant-rate wells are present, add them to the flux vector\n if self.rate_well_grids is not None:\n Q[self.rate_well_grids] += self.rate_well_values\n\n \n #Return sparse data-structures\n return (T.tocsr(), \n csr_matrix((B, (np.arange(N), np.arange(N))), shape=(N,N)), \n Q)", "def prepare(self) -> None:\n\n \"\"\"\n Objective function\n Coefficient -2 means that we solve maximization problem (multiple all \n value to -1) and also there are left coverage area and right coverage \n area for each station (2* cov)\n \"\"\"\n\n f = [-2 * self.cov[i] for i in range(self.get_column_num)]\n self._f = np.array(f)\n\n \"\"\" Inequality Constraints\"\"\"\n ineq_cost = [self.cost[i] for i in range(self.get_column_num)]\n self._ineq_constraints = np.array(ineq_cost)\n self._b = np.array(self.cost_limit)\n\n \"\"\" \n There is no equality constraints. \n self._eq_constraints is empty\n self._beq is empty\n \"\"\"", "def _conditions(self, beg=-90, intvl=20, con_type='ori', stim='bar', \n\t\t\t\t\tbiphasic=True, unit='deg', con_list=[], temp_freq = 2):\n\t\t\n\t\t\n\t\tcon_types = ['ori', 'spat_freq', 'temporal_freq', 'chromatic', 'dl_bar']\n\t\tstims = ['bar', 'grating']\n\t\t\n\t\t\n\t\t# Checking if condition and stimulus type recognised. \n\t\tif not con_type.lower() in con_types:\n\t\t\tprint('con_type not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\t\tif not stim.lower() in stims:\n\t\t\tprint('stimulus not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\t\n\t\tself.parameters['condition_type'] = con_type.lower()\n\t\tself.parameters['condition_unit'] = unit.capitalize()\n\t\tself.parameters['stimulus'] = stim.lower()\n\t\t\n\t\tif stim.lower() == stims[1]:\n\t\t\t# Gratings are GENERALLY not biphasic\n\t\t\tself.parameters['biphasic'] = 'N/A'\n\t\telse:\n\t\t\tself.parameters['biphasic'] = biphasic\n\t\t\n\t\t# Address issue of whether the sampling rate suits teh temporal frequency of \n\t\t# the grating for FFT analysis\n\t\tif stim.lower() == 'grating':\n\t\t\tself.parameters['temp_freq'] = float(temp_freq)\n\t\t\t\n\t\t\t# Sample rate must be a multiple of F1/temp_freq for it to be a frequency measured\n\t\t\t# in the FFT.\n\t\t\tsamp_rate = 1/float(self.bin_width)\n\t\t\t\n\t\t\t\n\t\t\tassert samp_rate % temp_freq == 0., ('Bin_width (%s) is incompatible wih obtaining' \n\t\t\t\t\t\t\t\t\t\t\t\t 'an FFT containing the specified temp_freq (%s). '\n\t\t\t\t\t\t\t\t\t\t\t\t 'The sampling frequency (1/bin_width) must be a'\n\t\t\t\t\t\t\t\t\t\t\t\t 'multiple of the temp_freq. \\n\\n Try as a' \n\t\t\t\t\t\t\t\t\t\t\t\t 'bin_width %s and rerun self._sort().'\n\t\t\t\t\t\t\t\t\t\t\t\t % (self.bin_width, temp_freq, \n\t\t\t\t\t\t\t\t\t\t\t\t\t1/(np.ceil(samp_rate/float(temp_freq))*temp_freq)))\n\t\t\n\t\tself.cond_label = []\n\n\t\t\n\t\tdef circ(ori, bound = 360):\n\t\t\t\"\"\"Func that Ensures all orientation values are between 0 and 360 degrees.\n\t\t\t\"\"\"\n\t\t\t# ori[ori<-360] += 720\n\t\t\t# ori[ori<0] += 360\n\t\t\t# ori[ori>360] -= 360\n\t\t\t# ori[ori>720] -= 720\n\n\n\t\t\treturn ori % bound\n\n\t\t# if list of conditions provided directly\n\t\tif len(con_list) > 0:\n\t\t\t\n\t\t\t# Must match number of conditions\n\t\t\tassert len(con_list) == n_con, ('the number of labels provided '\n\t\t\t\t\t\t\t\t\t\t'manually (%s) does not match the '\n\t\t\t\t\t\t\t\t\t\t'number of conditions (%s).' % \n\t\t\t\t\t\t\t\t\t\t(len(con_list), n_con))\n\t\t\t \n\t\t\t# Must all be strings \n\t\t\tassert all(isinstance(l, str) for l in con_list), ('not all the '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'labels provided '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'are strings')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t# List of conditions as strings\n\t\t\tself.cond_label = con_list\n\t\t\t\n\t\t\t# Convert to floats\n\t\t\t# Relying on numpy conversion error should list be unable to convert to float.\n\t\t\tself.conditions = np.array(con_list).astype('float')\n\t\t\t\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t# # Generate list of strings or labels\n\t\t\t# for c in range(n_con):\n\t\t\t# label = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t# else:\n\t\t\t# for c in range(n_con):\n\t\t\t\t\t\n\t\t\t# label = '%s %s' %(self.conditions[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t\t\n\t\t\n\t\t# if condition tpye is orientation\n\t\telif con_type.lower() == con_types[0]:\n\t\t\t\n\t\t\t# Generate full range of conditions\n\t\t\tself.conditions = circ(np.arange(beg, beg+(n_con*intvl), intvl))\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = circ(self.conditions + 180) \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t# Generate list of strings for non-biphasic. \n\t\t\telse:\n\t\t\t\t\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t\t\t\n\t\t# IF condition type is Spat Freq \n\t\telif con_type.lower() == con_types[1]:\n\t\t\tself.conditions = np.arange(beg, beg + (n_con*intvl), intvl)\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\n\t\t\tfor c in range(n_con):\n\t\t\t\tlabel = '%s %s' %(self.conditions[c], self.parameters['condition_unit'])\n\t\t\t\tself.cond_label.append(label)\n\n\t\t# IF condition type is dl_bar\t\t\t\t\t\n\t\telif con_type.lower() == con_types[4]:\n\n\t\t\tself.conditions = np.array([0, 1])\n\t\t\tself.cond_label = ['dark','light']\n\n\t\t\tif len(con_list) > 0:\n\t\t\t\tself.conditions = np.array(con_list).astype('float')\n\n\t\t\t\tif con_list[0] > con_list[1]:\n\t\t\t\t\tself.cond_label = self.cond_label[::-1]\n\n\t\t\tif biphasic:\n\n\t\t\t\tself.conditions2 = self.conditions\n\n\t\t\t\tself.cond_label.extend(\n\t\t\t\t\t[\n\t\t\t\t\t\tcl + ' second'\n\t\t\t\t\t\tfor cl in self.cond_label\n\t\t\t\t\t]\t\n\t\t\t\t\t)\n\n\n\n\t\t# if condition type is not predefined in this method, presume linear range \n\t\telif not con_type.lower() in con_types:\n\t\t\t\n\t\t\tself.conditions = np.arange(beg, beg+(n_con*intvl), intvl)\n\n\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\n\t\t\telse:\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\t\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)", "def condition(self, evidence):\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n cvars = [ v for v in self.v if v in evidence ]\n return Factor(self.v - cvars, self.t[ax]) # forces table copy in constructor", "def build_basis(self):\n if self.debug:\n print('sps_basis: rebuilding basis')\n # Setup the internal component basis arrays\n inwave = self.ssp.wavelengths\n nbasis = len(np.atleast_1d(self.params['mass']))\n self.nbasis = nbasis\n # nbasis = ( len(np.atleast_1d(self.params['zmet'])) *\n # len(np.atleast_1d(self.params['tage'])) )\n self.basis_spec = np.zeros([nbasis, len(inwave)])\n self.basis_mass = np.zeros(nbasis)\n\n i = 0\n tesc = self.params['dust_tesc']\n dust1, dust2 = self.params['dust1'], self.params['dust2']\n for j, zmet in enumerate(self.params['zmet']):\n for k, tage in enumerate(self.params['tage']):\n # get the intrinsic spectrum at this metallicity and age\n if self.safe:\n # do it using compsp\n if self.ssp._zcontinuous > 0:\n self.ssp.params['logzsol'] = zmet\n else:\n self.ssp.params['zmet'] = zmet\n w, spec = self.ssp.get_spectrum(tage=tage, peraa=True)\n mass = self.ssp.stellar_mass\n else:\n # do it by hand. Faster but dangerous\n spec, mass, lbol = self.ssp.ztinterp(zmet, tage, peraa=True)\n self.basis_spec[i, :] = spec\n self.basis_mass[i] = mass\n i += 1\n self.basis_dirty = False", "def calculate_strain(self):\n\t\n\tself.Hpij = np.zeros((3,3,self.N),dtype=np.complex_)\n\tself.Hcij = np.zeros((3,3,self.N),dtype=np.complex_)\n\t\n\tcalc_Hcp_ij(self)\t\n\n\tconstruct_basis_tensors(self)\n\n\tcalc_Hij(self)\n\t\n\tcontract_tenors(self)\n\n\treturn", "def generate_condition_data(self):\n # set 'Conditions' column to NA\n self.output['Conditions'] = 'NA'\n\n # instantiate new MarkovChain object\n MC = MarkovChain()\n\n # apply forecast function on 'Conditions' column based on temperature\n # and humidity values for each observation period\n params = self.output[[\"Temperature\", \"Humidity\"]]\n self.output[['Conditions']] = params.apply(\n lambda x: MC.forecast_weather(x.values[0], x.values[1]), axis=1)", "def get_condition(self) -> dict:\n url = self.base_url + \"/condition\"\n condition = self._session.get(url).json()\n keys = [\"bandwidth\", \"latency\", \"jitter\", \"loss\"]\n result = {k: v for (k, v) in condition.items() if k in keys}\n return result", "def condition_number(self):\n return self._condition_number", "def conditions():\n pass", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def getIntegralConditions(self):\n return self._getConditions(restrict=['CI'])", "def mi(self, lhs, rhs, cond=None):\n\t\tbins = np.amax(data, axis=0) # read levels for each variable\n\t\tif len(bins) == 1:\n\t\t\thist,_ = np.histogramdd(data, bins=(bins)) # frequency counts\n\t\t\tPx = hist/hist.sum()\n\t\t\tMI = -1 * np.sum( Px * np.log( Px ) )\n\t\t\treturn round(MI, 4)\n\t\t\t\n\t\tif len(bins) == 2:\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional==True:\n\t\t\t# CHECK FOR > 3 COLUMNS -> concatenate Z into one column\n\t\t\tif len(bins) > 3:\n\t\t\t\tdata = data.astype('str')\n\t\t\t\tncols = len(bins)\n\t\t\t\tfor i in range(len(data)):\n\t\t\t\t\tdata[i,2] = ''.join(data[i,2:ncols])\n\t\t\t\tdata = data.astype('int')[:,0:3]\n\n\t\t\tbins = np.amax(data,axis=0)\n\t\t\thist,_ = np.histogramdd(data, bins=bins) # frequency counts\n\n\t\t\tPxyz = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPz = np.sum(Pxyz, axis = (0,1)) # P(Z)\n\t\t\tPxz = np.sum(Pxyz, axis = 1) # P(X,Z)\n\t\t\tPyz = np.sum(Pxyz, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxy_z = Pxyz / (Pz+1e-7) # P(X,Y | Z) = P(X,Y,Z) / P(Z)\n\t\t\tPx_z = Pxz / (Pz+1e-7) # P(X | Z) = P(X,Z) / P(Z)\t\n\t\t\tPy_z = Pyz / (Pz+1e-7) # P(Y | Z) = P(Y,Z) / P(Z)\n\n\t\t\tPx_y_z = np.empty((Pxy_z.shape)) # P(X|Z)P(Y|Z)\n\t\t\tfor i in range(bins[0]):\n\t\t\t\tfor j in range(bins[1]):\n\t\t\t\t\tfor k in range(bins[2]):\n\t\t\t\t\t\tPx_y_z[i][j][k] = Px_z[i][k]*Py_z[j][k]\n\t\t\tPxyz += 1e-7\n\t\t\tPxy_z += 1e-7\n\t\t\tPx_y_z += 1e-7\n\t\t\tMI = np.sum(Pxyz * np.log(Pxy_z / (Px_y_z)))\n\t\t\t\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional == False:\n\t\t\tdata = data.astype('str')\n\t\t\tncols = len(bins)\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdata[i,1] = ''.join(data[i,1:ncols])\n\t\t\tdata = data.astype('int')[:,0:2]\n\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def calculateElementBoundaryCoefficients(self):\n pass", "def cond_dict(calib, F, t, p):\n try:\n Conductivity = []\n f = [x/1000 for x in F]\n for F_0, t_0, p_0 in zip(f, t, p):\n temp = ((calib['G'] + calib['H'] * math.pow(F_0,2)\n + calib['I'] * math.pow(F_0,3)\n + calib['J'] * math.pow(F_0,4))\n / (1 + calib['CTcor'] * t_0 + calib['CPcor'] * p_0))\n temp = round(temp, 5)\n Conductivity.append(temp)\n #single mode\n except:\n f = F/1000\n Conductivity = ((calib['G'] + calib['H'] * math.pow(f,2)\n + calib['I'] * math.pow(f,3)\n + calib['J'] * math.pow(f,4))\n / (1 + calib['CTcor'] * t + calib['CPcor'] * p))\n Conductivity = round(Conductivity,5)\n return Conductivity", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.hc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.Tcol[self.layers]\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.T[\n self.layers] # Pretty cool that this works, really\n return mat, rhs", "def _get_conditions(self):\n return self.__conditions", "def _get_conditions(self):\n return self.__conditions", "def _get_conditions(self):\n return self.__conditions" ]
[ "0.7401494", "0.64921606", "0.57574993", "0.5757078", "0.57382345", "0.5737791", "0.57173383", "0.5618264", "0.56107277", "0.56098473", "0.5554291", "0.5542242", "0.55095696", "0.54061496", "0.53822345", "0.53605485", "0.53488797", "0.53358144", "0.5334709", "0.5314764", "0.52913934", "0.5286889", "0.5286514", "0.5286514", "0.5269763", "0.5242736", "0.5239046", "0.5238498", "0.5238498", "0.5238498" ]
0.7459991
0
Appends a number of constraints to the optimization task. appendcons(self,num_)
def appendcons(self,num_): res = __library__.MSK_XX_appendcons(self.__nativep,num_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendcons(self,num_): # 3\n res = self.__obj.appendcons(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_constraint(self, constraint):\n constraint_type = constraint[0]\n if constraint_type == 'time':\n dependent_variable = constraint[-2]\n dependee_variable = constraint[-1]\n dependent_index = self.subvariable_name.index(dependent_variable)\n dependee_index = self.subvariable_name.index(dependee_variable)\n constraint[-2] = self.value[dependent_index]\n constraint[-1] = self.value[dependee_index]\n if constraint_type in ['threshold', 'count']:\n threshold_variable = constraint[-1]\n threshold_index = self.subvariable_name.index(threshold_variable)\n constraint[-1] = self.value[threshold_index]\n if constraint_type == 'only_one':\n onlyone_variable = constraint[-1]\n onlyone_index = self.subvariable_name.index(onlyone_variable)\n constraint[-1] = self.value[onlyone_index]\n if constraint_type in self.constraint.keys():\n self.constraint[constraint_type] += [constraint[1:]]\n else:\n self.constraint[constraint_type] = [constraint[1:]]", "def add_constraint(self, constraint, problem):\n problem += constraint", "def appendvars(self,num_):\n res = __library__.MSK_XX_appendvars(self.__nativep,num_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_constraints_based_on_task(self):\n # Note this method is only called when a task is found\n for counter, agent in enumerate(self.agents):\n if len(agent.task_list) > 0: # task has been chosen\n last_element = agent.task_list[-1]\n self.graph.add_movement_constraint_by_name(self.tasks[last_element].getName(), weight=self.t)", "def add_constraints(self, constraints):\n for const in constraints:\n self.add_constraint(const.type, const.column, const.check_clause)", "def addConstraint(constraint, problem):\n problem += constraint", "def add_constraint(name, indexes, constraint_func):\n name_base = name\n for _ in range(len(indexes)):\n name_base += \"_{}\"\n\n for index in itertools.product(*indexes):\n name = name_base.format(*index)\n con = constraint_func(index)\n\n constraints.append((con, name))", "def add_constraint(self, constraint):\n self.constraints.append(constraint)", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints", "def appendvars(self,num_): # 3\n res = self.__obj.appendvars(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_constraint(self, var1, var2, constraint_fn) :\n self.constraints.append(Constraint(var1, var2, constraint_fn))\n return self", "def add_constraint(self, constraint_type, **kwargs):\n if constraint_type == \"custom\":\n self.constraints += tuple(kwargs.values())[0]\n elif constraint_type == \"weight\":\n bound, leverage = self.const_creator.create_constraint(constraint_type, **kwargs)\n self.bounds = bound\n self.leverage = kwargs['leverage']\n self.constraints[0] = leverage[0] # Total Leverage is always the first constraint\n else:\n self.constraints += self.const_creator.create_constraint(constraint_type, **kwargs)", "def __addRowConstraints(self):\n for y in range(self.height):\n plusTarget = self.rowPlusCounts[y]\n minusTarget = self.rowMinusCounts[y]\n plusTotal = 0\n minusTotal = 0\n for x in range(self.width):\n g = self.grid[(x, y)]\n plusTotal = plusTotal + If(g == Magnets.PLUS, 1, 0)\n minusTotal = minusTotal + If(g == Magnets.MINUS, 1, 0)\n if plusTarget != None:\n self.solver.add(plusTotal == plusTarget)\n if minusTarget != None:\n self.solver.add(minusTotal == minusTarget)", "def addConstraint(self, conName, compIDs=None, lower=-1e20, upper=1e20, dvIndex=0):\n if compIDs is not None:\n # Make sure CompIDs is flat and get element numbers on each proc corresponding to specified compIDs\n compIDs = self._flatten(compIDs)\n else:\n nComps = self.meshLoader.getNumComponents()\n compIDs = list(range(nComps))\n\n constrObj = self._createConstraint(dvIndex, compIDs, lower, upper)\n if constrObj.nCon > 0:\n self.constraintList[conName] = constrObj\n success = True\n else:\n self._TACSWarning(\n f\"No adjacent components found in `compIDs`. Skipping {conName}.\"\n )\n success = False\n\n return success", "def SetPRCatConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for line in tot :\n for val in line :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def addConstraint(self, *args):\n return _libsbml.Model_addConstraint(self, *args)", "def create_constraints(self, courses):\n for i, course1 in enumerate(courses):\n for j, course2 in enumerate(courses):\n if i <= j:\n continue\n self.p.add_constraint(section_constraint, [course1, course2])\n self.p.add_constraint(self.time_conflict, [course1])", "def constraints(self, x):\n pass", "def _constraints_task_valid(self):\n def rule(model):\n \"\"\"\n Bind the tail entries to zero\n \"\"\"\n num = self.num_timeslots\n ind_j = model.tasks\n total = sum(model.A2[num-1, j] for j in ind_j)\n total += sum(model.A3[num-1, j] for j in ind_j)\n total += sum(model.A4[num-1, j] for j in ind_j)\n total += sum(model.A3[num-2, j] for j in ind_j)\n total += sum(model.A4[num-2, j] for j in ind_j)\n total += sum(model.A4[num-3, j] for j in ind_j)\n return None, total, 0\n\n self.model.constrain_tail = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots\n ind_j = model.tasks\n total = sum(model.A[i, j] * (1-self.valid[i, j]) for i in ind_i\n for j in ind_j)\n total += sum(model.A2[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n total += sum(model.A3[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid0 = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots2\n ind_j = model.tasks\n inv = 1-self.valid\n total = sum(\n model.A2[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A3[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots3\n ind_j = model.tasks\n total += sum(\n model.A3[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots4\n ind_j = model.tasks\n total += sum(\n model.A4[i, j] * inv[i + 3, j] for i in ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid1 = Constraint(rule=rule)", "def add_pair (self, first, second):\n self.constraints_.append ((first, second))", "def addConstrs(self, constrs, name=''):\n ...", "def add_constraint(self, constraint):\n self._ckey += 1\n self.constraints[self._ckey] = constraint", "def _constraints_nonoverlapping_tasks(self):\n\n def rule(model, i):\n total = sum(model.A[i, j] for j in model.tasks)\n total += sum(model.A2[i, j] for j in model.tasks)\n total += sum(model.A3[i, j] for j in model.tasks)\n total += sum(model.A4[i, j] for j in model.tasks)\n if i > 0:\n total += sum(model.A2[i - 1, j] for j in model.tasks)\n total += sum(model.A3[i - 1, j] for j in model.tasks)\n total += sum(model.A4[i - 1, j] for j in model.tasks)\n if i > 1:\n total += sum(model.A3[i - 2, j] for j in model.tasks)\n total += sum(model.A4[i - 2, j] for j in model.tasks)\n if i > 2:\n total += sum(model.A4[i - 3, j] for j in model.tasks)\n return 0, total, 1\n\n self.model.constrain_nonoverlapping = Constraint(self.model.timeslots,\n rule=rule)", "def number_of_constraints(self):\n return len(self.constraints)", "def constraint(self, c):\n self.add_constraint(c)", "def _createConstraint(self, dvIndex, compIDs, lbound, ubound):\n size = self.comm.size\n rank = self.comm.rank\n # Gather the dv mapping from each proc\n globalToLocalDVNumsOnProc = self.comm.gather(self.globalToLocalDVNums, root=0)\n # Assemble constraint info on root proc\n if rank == 0:\n # Create a list of lists that will hold the sparse data info on each proc\n rowsOnProc = [[] for _ in range(size)]\n colsOnProc = [[] for _ in range(size)]\n valsOnProc = [[] for _ in range(size)]\n conCount = 0\n foundCompPairs = []\n # Loop through all adjacent component pairs\n for compPair in self.adjacentComps:\n # Check if they are in the user provided compIDs\n if compPair[0] in compIDs and compPair[1] in compIDs:\n # Add comp pair to list\n foundCompPairs.append(compPair)\n # We found a new constraint\n for i, comp in enumerate(compPair):\n # Get the TACS element object associated with this compID\n elemObj = self.meshLoader.getElementObject(comp, 0)\n elemIndex = 0\n # Get the dvs owned by this element\n globalDvNums = elemObj.getDesignVarNums(elemIndex)\n # Check if specified dv num is owned by each proc\n for proc_i in range(size):\n globalToLocalDVNums = globalToLocalDVNumsOnProc[proc_i]\n if globalDvNums[dvIndex] in globalToLocalDVNums:\n globalDVNum = globalDvNums[dvIndex]\n localDVNum = globalToLocalDVNums[globalDVNum]\n rowsOnProc[proc_i].append(conCount)\n colsOnProc[proc_i].append(localDVNum)\n if i == 0:\n valsOnProc[proc_i].append(1.0)\n else:\n valsOnProc[proc_i].append(-1.0)\n break\n conCount += 1\n\n else:\n rowsOnProc = None\n colsOnProc = None\n valsOnProc = None\n conCount = 0\n foundCompPairs = None\n\n # Scatter local sparse indices/values to remaining procs\n rows = self.comm.scatter(rowsOnProc, root=0)\n cols = self.comm.scatter(colsOnProc, root=0)\n vals = self.comm.scatter(valsOnProc, root=0)\n\n # Get local sparse matrix dimensions\n foundCompPairs = self.comm.bcast(foundCompPairs, root=0)\n conCount = self.comm.bcast(conCount, root=0)\n nLocalDVs = self.getNumDesignVars()\n\n constrObj = SparseLinearConstraint(\n self.comm, rows, cols, vals, conCount, nLocalDVs, lbound, ubound\n )\n constrObj.compPairs = foundCompPairs\n\n # Create linear constraint object\n return constrObj", "def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))", "def constraints(self):\n ..." ]
[ "0.71618354", "0.5837507", "0.57036966", "0.5674323", "0.5663676", "0.564805", "0.5605173", "0.5550933", "0.55144083", "0.544833", "0.5390708", "0.5379284", "0.5327178", "0.5318046", "0.5295975", "0.529086", "0.52815723", "0.5280168", "0.527523", "0.52032757", "0.51928747", "0.5139332", "0.5134489", "0.51184255", "0.5115553", "0.5091473", "0.5083742", "0.508356", "0.5057673", "0.50483423" ]
0.7185383
0
Appends a number of variables to the optimization task. appendvars(self,num_)
def appendvars(self,num_): res = __library__.MSK_XX_appendvars(self.__nativep,num_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendvars(self,num_): # 3\n res = self.__obj.appendvars(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def addVars(self, *indexes, **kwargs):\n ...", "def add_variables(self, variables):\n for variable in variables:\n self.variables.append(variable)", "def add_variables(self, n_variables, lb=None, ub=None, var_type=None):\n curr_n_vars = self.problem.variables.get_num()\n\n lb = convert_cplex_val(lb)\n ub = convert_cplex_val(ub)\n\n if var_type.lower() == \"real\" or var_type.lower() == \"continuous\":\n vtype = cplex.Cplex.variables.type.continuous\n\n elif var_type.lower() == \"int\" or var_type.lower() == \"integer\":\n vtype = cplex.Cplex.variables.type.integer\n\n elif var_type.lower() == \"binary\" or var_type.lower() == \"bool\" or var_type.lower() == \"boolean\":\n vtype = cplex.Cplex.variables.type.binary\n\n elif var_type.lower() == \"auto\" or var_type is None:\n vtype = cplex.Cplex.variables.type.binary\n\n else:\n raise Exception(\"Vartype '{}' unsupported.\".format(var_type))\n\n if lb is not None and ub is not None:\n self.problem.variables.add(\n lb=[ lb ] * n_variables,\n ub=[ ub ] * n_variables,\n types=[ vtype ] * n_variables)\n\n elif lb is not None:\n self.problem.variables.add(\n lb=[ lb ] * n_variables,\n types=[ vtype ] * n_variables)\n\n elif ub is not None:\n self.problem.variables.add(\n ub=[ ub ] * n_variables,\n types=[ vtype ] * n_variables)\n\n else:\n self.problem.variables.add(\n types=[ vtype ] * n_variables)\n\n # Return the 0-based indexes of the new variables\n new_var_idxs = xrange(curr_n_vars, curr_n_vars + n_variables)\n return new_var_idxs", "def num_vars(self):\n return self.nvars", "def num_vars(self):\n return self._nvars", "def set_obs(self, num_obs):\n curr_obs = self._nobs\n if num_obs < curr_obs:\n raise ValueError(\"num_obs must be >= \" + str(curr_obs))\n if num_obs == curr_obs:\n return\n isstrvar = self._isstrvar\n empty_row = ['' if isstrvar(i) else MISSING for i in range(self._nvar)]\n self._varvals += [copy.copy(empty_row) \n for _ in range(num_obs - curr_obs)]\n self._nobs = num_obs\n self._changed = True\n # Need to clear srtlist. If there are string variables, there \n # might now be empty strings after non-empty string. If there \n # are numerical variables with extended missing, there will now \n # be \".\" missing after extended missing. Issue pointed out at\n # http://www.stata.com/statalist/archive/2013-08/msg00576.html\n self._srtlist = [None]*self._nvar", "def append2ncfile(dn2t,var,num):\n print(\"appending..\")\n dn2t[num] = var", "def add_variable(self, name):\n self.all_variables.add(name)", "def appendbarvars(self,dim_):\n num_ = None\n if num_ is None:\n num_ = len(dim_)\n elif num_ != len(dim_):\n raise IndexError(\"Inconsistent length of array dim\")\n if dim_ is None:\n raise ValueError(\"Argument dim cannot be None\")\n if dim_ is None:\n raise ValueError(\"Argument dim may not be None\")\n if isinstance(dim_, numpy.ndarray) and dim_.dtype is numpy.dtype(numpy.int32) and dim_.flags.contiguous:\n _dim_copyarray = False\n _dim_tmp = ctypes.cast(dim_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dim_ is not None:\n _dim_copyarray = True\n _dim_np_tmp = numpy.zeros(len(dim_),numpy.dtype(numpy.int32))\n _dim_np_tmp[:] = dim_\n assert _dim_np_tmp.flags.contiguous\n _dim_tmp = ctypes.cast(_dim_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dim_copyarray = False\n _dim_tmp = None\n \n res = __library__.MSK_XX_appendbarvars(self.__nativep,num_,_dim_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))", "def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))", "def add_var(self, name, comp):\n self._main_model.add_var(name, comp)", "def add_vars(self, size):\n variables = {'ice_gen': cvx.Variable(shape=size, name='ice_gen', nonneg=True),\n 'on_ice': cvx.Variable(shape=size, boolean=True, name='on_ice')}\n return variables", "def put_var_param(self, var_type, num_vars):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n ierr = exolib.py_expvp(self.exoid, var_type.lower(), num_vars)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var params\")", "def appendbarvars(self,dim): # 3\n num_ = None\n if num_ is None:\n num_ = len(dim)\n elif num_ != len(dim):\n raise IndexError(\"Inconsistent length of array dim\")\n if num_ is None: num_ = 0\n if dim is None: raise TypeError(\"Invalid type for argument dim\")\n if dim is None:\n dim_ = None\n else:\n try:\n dim_ = memoryview(dim)\n except TypeError:\n try:\n _tmparr_dim = array.array(\"i\",dim)\n except TypeError:\n raise TypeError(\"Argument dim has wrong type\")\n else:\n dim_ = memoryview(_tmparr_dim)\n \n else:\n if dim_.format != \"i\":\n dim_ = memoryview(array.array(\"i\",dim))\n \n res = self.__obj.appendbarvars(num_,dim_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def do_add(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['AddingVariablesList']))\n\t\t'''\n\n\t\t#Apply\t\n\t\tself.map('append',map(\n\t\t\t\t\t\t\t\t\tlambda __AddingVariable:\n\t\t\t\t\t\t\t\t\t{'LiargVariablesList':[__AddingVariable]},\n\t\t\t\t\t\t\t\t\tself.AddingVariablesList\n\t\t\t\t\t\t\t\t)\n\t\t\t\t)", "def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}", "def calculate_vars(self):\n pass", "def nvar(self):\n return len(self.__vars)", "def add_vars(size):\n return {'regu_c': cvx.Variable(shape=size, name='regu_c'),\n 'regd_c': cvx.Variable(shape=size, name='regd_c'),\n 'regu_d': cvx.Variable(shape=size, name='regu_d'),\n 'regd_d': cvx.Variable(shape=size, name='regd_d')}", "def create_variables(self):\n self.create_weight_variable(self.input_size + [self.hidden_size[0]], name=\"W1\")\n\n self.create_bias_variable((1, self.hidden_size[0]), name=\"b1\")\n\n for i in range(self.n_hidden-1):\n self.create_weight_variable([self.hidden_size[i], self.hidden_size[i+1]], \n name=\"W\"+str(i+2))\n\n self.create_bias_variable((1, self.hidden_size[i+1]), name=\"b\"+str(i+2))\n\n for i in range(len(self.output_size)):\n self.create_weight_variable([self.hidden_size[-1], self.output_size[i]], name=\"Wo_%s\"%i)\n\n self.create_bias_variable((1, self.output_size[i]), name=\"bo_%s\"%i)", "def buildMainVars(self, model, n, name=\"main_cells\"):\n import gurobipy as gb\n ASSERT_TYPE(model, gb.Model)\n lb = 0 if self.nnls else -gb.GRB.INFINITY\n # First index is cell (among non-zero), second index is child\n two_d_vars: gb.MVar = model.addMVar((int(n), int(self.childGeoLen)), vtype=gb.GRB.CONTINUOUS, lb=lb, name=name)\n return two_d_vars", "def push(self, **vars):\n self._variable_stack.append(dict(self._variables))\n self.update(**vars)", "def set_node_variable_number(self, number):\n if number == 0: # pragma: no cover\n return\n\n self._f.dimensions[\"num_nod_var\"] = number\n\n self._f.create_variable(\n \"name_nod_var\", (\"num_nod_var\", \"len_name\"),\n dtype=\"|S1\", **self._comp_opts)\n\n for _i in range(number):\n name = \"vals_nod_var%i\" % (_i + 1)\n self._f.create_variable(\n name, (\"time_step\", \"num_nodes\"),\n dtype=self.__f_dtype, **self._comp_opts)", "def put_var_names(self, var_type, num_vars, var_names):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n # var names must all be of same length due to Fortran restrictions\n var_names = [\"{0:{1}s}\".format(x, MAX_STR_LENGTH)[:MAX_STR_LENGTH]\n for x in var_names]\n ierr = exolib.py_expvan(self.exoid, var_type.lower(), var_names)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var names\")", "def vars_add ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) + float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## shortcut \n if 0 == var1 : return var2 ## SHORTCUT\n #\n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_add ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_add ( var1 , var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Addition ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def addVariable(self, name, value, save = False):\r\n setattr(self, name, value)\r\n if save and name not in self.variables:\r\n self.variables.append(name)", "def variables_num(self):\n return 1", "def push(self, number_of_names):\n self.local_variables = EnvironmentLevel(self.local_variables)\n self.local_types = EnvironmentLevel(self.local_types)" ]
[ "0.7904469", "0.59922993", "0.5989691", "0.5909687", "0.5740387", "0.56843954", "0.5554662", "0.55087805", "0.5492639", "0.54744667", "0.5469596", "0.5469596", "0.5439085", "0.54265416", "0.5411829", "0.54096377", "0.5366153", "0.5350174", "0.53359", "0.53321993", "0.5321142", "0.52871245", "0.52830297", "0.52681446", "0.52451015", "0.5231518", "0.51841635", "0.51763934", "0.51733416", "0.51403445" ]
0.77544904
1
Removes a number of constraints. removecons(self,subset_)
def removecons(self,subset_): num_ = None if num_ is None: num_ = len(subset_) elif num_ != len(subset_): raise IndexError("Inconsistent length of array subset") if subset_ is None: raise ValueError("Argument subset cannot be None") if subset_ is None: raise ValueError("Argument subset may not be None") if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous: _subset_copyarray = False _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subset_ is not None: _subset_copyarray = True _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32)) _subset_np_tmp[:] = subset_ assert _subset_np_tmp.flags.contiguous _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subset_copyarray = False _subset_tmp = None res = __library__.MSK_XX_removecons(self.__nativep,num_,_subset_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removecons(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecons(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecones(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecones(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecones(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removecones(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removevars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removevars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)", "def removevars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removevars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removebarvars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def removebarvars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removebarvars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_unary_constraint(self, var):\n\n del self.__constraints[var]", "def remove(self, *args):\n return _libsbml.ListOfConstraints_remove(self, *args)", "def _remove_associated_consvar(self, all_cons_subclasses, all_var_subclasses,\n collection):\n\n if not hasattr(collection, '__iter__'):\n collection = [collection]\n\n strfy = lambda x:x if isinstance(x, str) else x.id\n\n for cons_type in all_cons_subclasses:\n for element in collection:\n try:\n cons = self._cons_kinds[cons_type.__name__].get_by_id(strfy(element))\n self.remove_constraint(cons)\n except KeyError as e:\n pass\n for var_type in all_var_subclasses:\n for element in collection:\n try:\n var = self._var_kinds[var_type.__name__].get_by_id(strfy(element))\n self.remove_variable(var)\n except KeyError as e:\n pass", "def removeConstraint(self, *args):\n return _libsbml.Model_removeConstraint(self, *args)", "def removeConstraint(self, constraint: Constraint, /) -> None:\n ...", "def removeBounded(self, bounds):\n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n lst=[]\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2): \n lst.append(i)\n self.removeMultiple(lst)\n return", "def _discretize(self, constraints_object):\n pass", "def prune(self,domains,constraint):\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n new_domains = deepcopy(domains)\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n new_domains[left_var[0]] = [right_val[0]]\n \n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n new_set = set(new_domains[left_var[0]]) & set(new_domains[right_var[0]])\n new_domains[left_var[0]] = list(new_set)\n new_domains[right_var[0]] = list(new_set)\n\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # print(l,r)\n # print(new_domains)\n # print(constraint)\n\n for var,mult in zip(left_var,left_const_mult):\n max_var = max(domains[var])\n comp = (r-(l-mult*max_var)) / mult\n for elem in domains[var]:\n if elem < comp:\n new_domains[var].remove(elem)\n\n for var,mult in zip(right_var,right_const_mult):\n min_var = min(domains[var])\n comp = (l-(r-mult*min_var)) / mult\n for elem in domains[var]:\n if elem > comp:\n new_domains[var].remove(elem)\n\n # for i,domain in enumerate(new_domains):\n # if len(domain) == 0:\n # print(i,l,r)\n # print(\"Old:\",domains)\n # print(\"New:\",new_domains)\n # print(domains)\n # print(constraint)\n # print(\"------------------------\")\n # raise SystemError(\"Domain is Empty!!\")\n\n return new_domains", "def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)", "def remove_constraints(mvi, surfaces):\n\n state = save_state(mvi)\n indices = [surface.index for surface in surfaces]\n lam0 = np.delete(mvi.lambda0, indices)\n lam1 = np.delete(mvi.lambda1, indices)\n\n mvi.system.hold_structure_changes()\n for surface in surfaces:\n surface.deactivate_constraint()\n mvi.system.resume_structure_changes() \n\n restore_state(mvi, state)\n mvi.lambda0 = lam0\n mvi.lambda1 = lam1", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def removeSkeletalConstraints(self):\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def slice(self, evidence={}):\n return self.condition(evidence)\n \n \n\n# def eliminate(self, elimVars, elimOp):\n # TODO: awkward way to define this; convert to more direct implementation?\n for v in elimVars:\n if len(self.markovBlanket(v)) > 2: raise ValueError(\"Cannot eliminate {} with {} (>2) neighbors\".format(v,len(self.markovBlanket(v))))\n flist = self.factorsWith(v)\n gm_model = GraphModel(flist); print(gm_model); gm_model.eliminate([v],elimOp)\n fnew = gm_model.factors[0]\n self.removeFactors(flist); # doesn't quite work? numerical roundoff issues?\n self.L[v,:] = 0; self.L[:,v] = 0; self.h[v] = 0; # TODO: better to mark as removed? how?\n self.addFactors([fnew])\n # TODO: \"remove\" variable by setting states = 0? \"known value\" = 0?", "def remove_cond(self, idx):\n model = self._model\n model.beginRemoveRows(ROOT_MIDX, idx, idx)\n del self._conds[idx]\n model.endRemoveRows()\n self.notify_wizard()", "def remove(self, i):\n assert self.apply_remove_point_rules((self._ys[i], self._xs[i])), 'Removal rules are not satisfied'\n\n if len(self.get_raw_xs()) > 5:\n if self.is_settable:\n self._remove_xs(i)\n self._remove_ys(i)\n self.is_changed = True\n else:\n raise ValueError('graph '+str(self.name)+' is not is_settable')\n elif not self.is_raw_data:\n raise ValueError('Must be at least 5 points for interpolation.')", "def eliminate_var(n, g,clq_ind,tree):\r\n l = len(clq_ind) # number of nodes eliminated\r\n \r\n new_ind = scipy.array(g.neighbors(n))\r\n new_clique = g.neighbors(n)\r\n new_clique.append(n) \r\n g.add_edges_from( combinations(new_clique,2) )\r\n \r\n for i,clq in enumerate(clq_ind):\r\n if n in clq:\r\n tree.add_edge(l,i)\r\n clq_ind[i] = scipy.setdiff1d(clq,new_clique)\r\n \r\n clq_ind.append(new_ind)\r\n g.remove_node(n)\r\n tree.node[l]['clique'] = new_clique", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def clear_categories(self, subset=None, inclusive=None):\n if inclusive is not None:\n inc = inclusive\n else:\n inc = self.is_cat_inclusive\n if subset is None:\n # clear all categories\n self.cats = None\n else:\n # Do not use self[subset].blah = 0 ! - SettingWithCopyWarning\n if inc:\n self.cats = self.cats.drop(\n columns=[col for col in self.cats.columns.values if subset in col]\n )\n else:\n self.cats = self.cats.drop(columns=subset)\n if len(self.cat_labels) == 0:\n self.is_categorised = False\n self.is_cat_inclusive = False", "def subsettter(clipsegments, lengthtype):\n if lengthtype == 'twothirds':\n clipsegments.remove('AR8')\n clipsegments.remove('AF13')\n elif lengthtype == 'abouthalf':\n clipsegments.remove('AR8')\n clipsegments.remove('AF13')\n clipsegments.remove('AF7')\n return clipsegments", "def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)", "def _apply_consraint_killers(constraints):\n to_kill, real_constraints = [], []\n for constr in constraints:\n if \"kill\" in constr and len(constr) == 1:\n to_kill.append(constr[\"kill\"])\n else:\n real_constraints.append(constr)\n\n to_kill = set(to_kill)\n\n survivors = []\n for constr in real_constraints:\n if \"id\" not in constr or constr[\"id\"] not in to_kill:\n survivors.append(constr)\n\n present_ids = [constr[\"id\"] for c in real_constraints if \"id\" in constr]\n\n if not to_kill.issubset(present_ids):\n invalid = to_kill.difference(present_ids)\n raise KeyError(f\"You try to kill constraint with non-exsting id: {invalid}\")\n\n return survivors" ]
[ "0.82700276", "0.7332951", "0.72566605", "0.67324287", "0.6559985", "0.64388156", "0.63277817", "0.6175792", "0.614257", "0.5968138", "0.5823671", "0.580427", "0.5789894", "0.57380253", "0.5684563", "0.5578613", "0.55290496", "0.5512659", "0.53915256", "0.5372119", "0.53693765", "0.5367088", "0.5350951", "0.53276014", "0.52809626", "0.5279108", "0.5273784", "0.5233627", "0.5232251", "0.5208824" ]
0.81802654
1
Removes a number of variables. removevars(self,subset_)
def removevars(self,subset_): num_ = None if num_ is None: num_ = len(subset_) elif num_ != len(subset_): raise IndexError("Inconsistent length of array subset") if subset_ is None: raise ValueError("Argument subset cannot be None") if subset_ is None: raise ValueError("Argument subset may not be None") if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous: _subset_copyarray = False _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subset_ is not None: _subset_copyarray = True _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32)) _subset_np_tmp[:] = subset_ assert _subset_np_tmp.flags.contiguous _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subset_copyarray = False _subset_tmp = None res = __library__.MSK_XX_removevars(self.__nativep,num_,_subset_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removevars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removevars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removebarvars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removebarvars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_variables(self):\n self.variables = []", "def del_variables(self, variables):\n variables = [variables] if isinstance(variables, str) else set(variables)\n indices = [\n index\n for index, variable in enumerate(self.variables)\n if variable in variables\n ]\n self.variables = np.delete(self.variables, indices, 0)\n self.cardinality = np.delete(self.cardinality, indices, 0)\n self.inhibitor_probability = [\n prob_array\n for index, prob_array in enumerate(self.inhibitor_probability)\n if index not in indices\n ]", "def keep_var(self, varnames):\n varnames = self._find_vars(varnames, empty_ok=False)\n vars_to_drop = set(self._varlist) - set(varnames)\n if len(vars_to_drop) > 0:\n self.drop_var(vars_to_drop)", "def removedummyvars(self, dummy_var_no):\n self.nodummyvariablelist = [] # Necessary for a list copy\n self.nodummyvariablelist.extend(self.variablelist)\n self.nodummygain = self.originalgain.copy()\n self.nodummyconnection = self.originalconnection.copy()\n for index in range(dummy_var_no):\n self.nodummyvariablelist.pop(0)\n self.nodummygain = np.delete(self.nodummygain, 0, 0)\n self.nodummygain = np.delete(self.nodummygain, 0, 1)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 0)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 1)\n\n [r, c] = self.nodummyconnection.shape\n self.nodummy_nodes = r", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def removeVariable(self, name, delete = True):\r\n if name in self.variables:\r\n self.variables.remove(name)\r\n if delete and hasattr(self, name):\r\n delattr(self, name)", "def remove_invariable_features(tX):\n\n features = tX.T\n stds = np.std(features, axis=1)\n indices = np.where(stds == 0)\n new_tX = np.delete(features, indices, 0).T\n return new_tX", "def drop_obs(self, in_ = None, if_ = None, all_obs = False):\n if self._nobs == 0:\n return\n if all_obs and (in_ is not None or if_ is not None):\n raise ValueError(\"all_obs cannot be combined with in_ or if_\")\n if not all_obs and in_ is None and if_ is None:\n raise ValueError(\"must specify one of in_, if_, or all_obs\")\n \n if all_obs:\n self._varvals = []\n self._nobs = 0\n else:\n varvals = self._varvals\n if if_ is None:\n to_drop = [i for i in in_]\n else:\n if in_ is None: in_ = range(self._nobs)\n to_drop = [i for i in in_ if if_(i)]\n to_drop.reverse()\n for i in to_drop:\n del varvals[i]\n self._nobs = len(self._varvals)\n self._changed = True", "def clear(self):\n self.vars = []", "def removecones(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecones(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_invariable_features_grouped(tX_grouped):\n\n new_tX_grouped = []\n for i in range(len(tX_grouped)):\n new_tX_grouped.append(remove_invariable_features(tX_grouped[i]))\n return new_tX_grouped", "def subset(self, new_var_names):\n # make copy of self\n new_shape = self.copy()\n \n # check that new names are a subset of old names\n if not new_var_names <= new_shape._var_names:\n extra_vars = set(new_var_names) - set(new_shape._var_names)\n extra_var_strings = [str(var) for var in extra_vars]\n msg = 'New variables must be a subset of existing variables. '\n msg += 'Got extra variables %s' % ', '.join(extra_var_strings)\n raise ValueError(msg)\n\n # drop unneeded vals\n for name in self._var_names:\n if name not in new_var_names:\n new_shape.drop_component(name)\n \n # return\n return new_shape", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def slice(self, evidence={}):\n return self.condition(evidence)\n \n \n\n# def eliminate(self, elimVars, elimOp):\n # TODO: awkward way to define this; convert to more direct implementation?\n for v in elimVars:\n if len(self.markovBlanket(v)) > 2: raise ValueError(\"Cannot eliminate {} with {} (>2) neighbors\".format(v,len(self.markovBlanket(v))))\n flist = self.factorsWith(v)\n gm_model = GraphModel(flist); print(gm_model); gm_model.eliminate([v],elimOp)\n fnew = gm_model.factors[0]\n self.removeFactors(flist); # doesn't quite work? numerical roundoff issues?\n self.L[v,:] = 0; self.L[:,v] = 0; self.h[v] = 0; # TODO: better to mark as removed? how?\n self.addFactors([fnew])\n # TODO: \"remove\" variable by setting states = 0? \"known value\" = 0?", "def removecones(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removecones(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6", "def _delete_slots_and_weights_in_optimizer(self):\n for layer_name, slots in self._tls._slot_variables.items():\n embed_var = self._get_embedding_variable(layer_name)\n embed_var_key = _var_key(embed_var)\n if embed_var_key in self._opt._slots:\n del self._opt._slots[embed_var_key]\n for _, var in slots.items():\n opt_weight_iter = 0\n while opt_weight_iter < len(self._opt._weights):\n if var is self._opt._weights[opt_weight_iter]:\n self._opt._weights.pop(opt_weight_iter)\n break\n else:\n opt_weight_iter += 1\n\n # Delete variables in unique_ids_all_layers.\n for key in list(self._tls._unique_ids_all_layers.keys()):\n del self._tls._unique_ids_all_layers[key]", "def remove_variables(project, env_spec_name, vars_to_remove, prepare_result=None):\n (env_prefix, status) = _prepare_env_prefix(project, env_spec_name, prepare_result, mode=provide.PROVIDE_MODE_CHECK)\n # we allow env_prefix of None, which means the env wasn't created so we won't\n # try to unset any values for the variable.\n if status is not None and not status:\n return status\n\n local_state = LocalStateFile.load_for_directory(project.directory_path)\n for varname in vars_to_remove:\n path_to_variable = _path_to_variable(env_spec_name, varname)\n\n if env_prefix is not None:\n _unset_variable(project, env_spec_name, env_prefix, varname, local_state)\n path_to_variable = _path_to_variable(env_spec_name, varname)\n project.project_file.unset_value(path_to_variable)\n project.project_file.save()\n local_state.save()\n\n return SimpleStatus(success=True, description=\"Variables removed from the project file.\")", "def remove_unary_constraint(self, var):\n\n del self.__constraints[var]", "def prune_vars(self, ratios, axis, apply=\"impretive\"):\n axis = axis[0] if isinstance(axis, list) else axis\n global_plan = PruningPlan(self.model.full_name)\n for var, ratio in ratios.items():\n if not global_plan.contains(var, axis):\n plan = self.prune_var(var, axis, ratio, apply=None)\n global_plan.extend(plan)\n if apply == \"lazy\":\n global_plan.apply(self.model, lazy=True)\n elif apply == \"impretive\":\n global_plan.apply(\n self.model,\n lazy=False,\n opt=self.opt,\n prune_type=self.prune_type)\n self.plan = global_plan\n return global_plan", "def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)", "def RemoveVariable(self, e):\n if (not self.mainparent.file_loaded):\n msg = \"An input file must be loaded before a variable can be removed\"\n ShowMessage(msg, kind='warn')\n return\n if (self.mainparent.namelist is None):\n msg = \"Use the menu to select a namelist, first\"\n ShowMessage(msg, kind='info')\n return\n self.mainparent.statusbar.SetStatusText(\"Removing a variable\", 0)\n\n remove_name = AskText(\"Enter variable name to remove\", title=\"Remove Variable\")\n\n if (remove_name is not None):\n self.mainparent.input_file.namelists[self.mainparent.namelist].remove_variable(remove_name)\n\n # redraw stuff\n self.mainparent.statusbar.SetStatusText(\"Removed: {}\".format(remove_name), 0)\n self.mainparent.nmlpanel.update(unset_namelist=False) # update displayed namelist to include new entry", "def reduceQtyVars(nb_min_var:int, dict_values:dict, list_models_var):\n\n dict2 = dict_values.copy()\n #On garde les variables qui ont une freq inferieur au seuil\n dict2 = {k: v for k, v in dict2.items() if v < nb_min_var}\n\n\n list_var_remove = list(dict2.keys())\n list_index_remove = []\n index_value = 0\n for model_var in list_models_var:\n var_in_models = list(model_var.dict_freq_var.keys())\n\n exists_var = any(x in var_in_models for x in list_var_remove)\n if exists_var == True:\n list_index_remove.append(index_value)\n\n index_value =index_value +1\n list_index_remove= reversed(list_index_remove)\n for element in list_index_remove:\n list_models_var.pop(element)\n \n return list_models_var", "def delete_variable(self, name):\n if name not in self._variables:\n logging.warning(\"Pipeline variable '%s' does not exist\", name)\n else:\n if isinstance(name, str):\n self._variables.pop(name)\n else:\n for var in name:\n self._variables.pop(var)\n return self", "def remove_features(data, target, fn):\n selected_data = []\n if fn == 'variance':\n sel = VarianceThreshold(threshold=(.1 * (1 - .8)))\n selected_data = sel.fit_transform(data)\n elif fn == 'L1':\n lsvc = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(data, target)\n model = SelectFromModel(lsvc, prefit=True)\n selected_data = model.transform(data)\n\n selected_t = np.transpose(selected_data)\n data_t = np.transpose(data)\n\n i = 0\n kept_cols = []\n removed_cols = []\n for i, col in enumerate(data_t):\n if col not in selected_t:\n removed_cols.append(i)\n else:\n kept_cols.append(i)\n return kept_cols, removed_cols", "def unsetVariable(self):\n return _libsbml.Rule_unsetVariable(self)", "def _remove_associated_consvar(self, all_cons_subclasses, all_var_subclasses,\n collection):\n\n if not hasattr(collection, '__iter__'):\n collection = [collection]\n\n strfy = lambda x:x if isinstance(x, str) else x.id\n\n for cons_type in all_cons_subclasses:\n for element in collection:\n try:\n cons = self._cons_kinds[cons_type.__name__].get_by_id(strfy(element))\n self.remove_constraint(cons)\n except KeyError as e:\n pass\n for var_type in all_var_subclasses:\n for element in collection:\n try:\n var = self._var_kinds[var_type.__name__].get_by_id(strfy(element))\n self.remove_variable(var)\n except KeyError as e:\n pass" ]
[ "0.8907224", "0.78043014", "0.7453004", "0.6692764", "0.6318491", "0.6138598", "0.59088767", "0.58950776", "0.5870665", "0.58485174", "0.5781053", "0.5777436", "0.5639282", "0.5567257", "0.55330926", "0.54802483", "0.5447446", "0.53930205", "0.53908974", "0.5379356", "0.5372259", "0.5337901", "0.5316398", "0.53161573", "0.5314299", "0.53064954", "0.5299361", "0.52731025", "0.52519685", "0.52380514" ]
0.854354
1
Removes a number of symmetric matrices. removebarvars(self,subset_)
def removebarvars(self,subset_): num_ = None if num_ is None: num_ = len(subset_) elif num_ != len(subset_): raise IndexError("Inconsistent length of array subset") if subset_ is None: raise ValueError("Argument subset cannot be None") if subset_ is None: raise ValueError("Argument subset may not be None") if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous: _subset_copyarray = False _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subset_ is not None: _subset_copyarray = True _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32)) _subset_np_tmp[:] = subset_ assert _subset_np_tmp.flags.contiguous _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subset_copyarray = False _subset_tmp = None res = __library__.MSK_XX_removebarvars(self.__nativep,num_,_subset_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removebarvars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removebarvars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removevars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removevars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removevars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removevars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecones(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecones(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecones(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removecones(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def purgeHighSparsedFeatures(df,threshold,barplot=False,title=''):\n \n thr = math.floor(df.shape[1] * threshold)\n rowsToDrop = np.array([])\n logger.debug(Sc+'Patient Threshold is %d' % thr) \n logger.debug(Sc+'Matrix dimensions : Rows %d , Columns %d'% (df.shape[0],df.shape[1]))\n #axis_x = np.arange(0,df.shape[0]) \n axis_y = np.array([]) \n numRows = df.shape[0] \n for i in range(1,numRows):\n arr = pd.isnull(df.iloc[i])\n nnan = np.sum(arr) \n axis_y = np.append(axis_y,nnan)\n if (nnan > thr):\n rowsToDrop = np.append(rowsToDrop,i)\n logger.debug ('%d features to drop ' % len(rowsToDrop))\n np.savetxt('debug/sparseFeaturesaxis_y.txt',axis_y)\n #if(barplot):\n # ax.title.set_text(title)\n # ax.bar(axis_x,axis_y) \n #logger.debug('After purge there are %d columns '% df.shape[1])\n return rowsToDrop", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def removedummyvars(self, dummy_var_no):\n self.nodummyvariablelist = [] # Necessary for a list copy\n self.nodummyvariablelist.extend(self.variablelist)\n self.nodummygain = self.originalgain.copy()\n self.nodummyconnection = self.originalconnection.copy()\n for index in range(dummy_var_no):\n self.nodummyvariablelist.pop(0)\n self.nodummygain = np.delete(self.nodummygain, 0, 0)\n self.nodummygain = np.delete(self.nodummygain, 0, 1)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 0)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 1)\n\n [r, c] = self.nodummyconnection.shape\n self.nodummy_nodes = r", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def removecons(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecons(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecons(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removecons(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)", "def remove_invariable_features(tX):\n\n features = tX.T\n stds = np.std(features, axis=1)\n indices = np.where(stds == 0)\n new_tX = np.delete(features, indices, 0).T\n return new_tX", "def removeExtraSets(cls, obj, *args, **kwargs):\n for i in pm.polyUVSet(obj, query=True, allUVSetsIndices=True)[1:]:\n name = pm.getAttr(obj + '.uvSet[' + str(i) + '].uvSetName')\n pm.polyUVSet(obj, delete=True, uvSet=name)", "def remove_abs_vars(self):\n self.m.remove(self.bp_abs)\n self.m.remove(self.bn_abs)\n self.m.remove(self.gp_abs)\n self.m.remove(self.gn_abs)\n self.m.remove(self.beta_p)\n self.m.remove(self.beta_n)\n self.m.remove(self.gamma_p)\n self.m.remove(self.gamma_n)", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def removed(bin_arr, extent, intent):\n result = np.copy(bin_arr)\n ree = result[extent]\n ree[:, intent] = 0\n result[extent] = ree\n return result", "def clear_categories(self, subset=None, inclusive=None):\n if inclusive is not None:\n inc = inclusive\n else:\n inc = self.is_cat_inclusive\n if subset is None:\n # clear all categories\n self.cats = None\n else:\n # Do not use self[subset].blah = 0 ! - SettingWithCopyWarning\n if inc:\n self.cats = self.cats.drop(\n columns=[col for col in self.cats.columns.values if subset in col]\n )\n else:\n self.cats = self.cats.drop(columns=subset)\n if len(self.cat_labels) == 0:\n self.is_categorised = False\n self.is_cat_inclusive = False", "def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)", "def prune_dims(variances, threshold=0.005):\r\n scale_z = np.sqrt(variances)\r\n return scale_z >= threshold", "def del_variables(self, variables):\n variables = [variables] if isinstance(variables, str) else set(variables)\n indices = [\n index\n for index, variable in enumerate(self.variables)\n if variable in variables\n ]\n self.variables = np.delete(self.variables, indices, 0)\n self.cardinality = np.delete(self.cardinality, indices, 0)\n self.inhibitor_probability = [\n prob_array\n for index, prob_array in enumerate(self.inhibitor_probability)\n if index not in indices\n ]", "def remove(self):\n for i in range(self.min_y+1, self.max_y+1):\n for j in range(self.min_x+1, self.max_x+1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = ' '\n except IndexError:\n pass", "def remove_variables(self):\n self.variables = []", "def trim_features():\n pass", "def remove_unary_constraint(self, var):\n\n del self.__constraints[var]", "def remove_zero_bars(dgm):\r\n inds = dgm[:,0] != dgm[:,1]\r\n return dgm[inds,:]", "def _delete_slots_and_weights_in_optimizer(self):\n for layer_name, slots in self._tls._slot_variables.items():\n embed_var = self._get_embedding_variable(layer_name)\n embed_var_key = _var_key(embed_var)\n if embed_var_key in self._opt._slots:\n del self._opt._slots[embed_var_key]\n for _, var in slots.items():\n opt_weight_iter = 0\n while opt_weight_iter < len(self._opt._weights):\n if var is self._opt._weights[opt_weight_iter]:\n self._opt._weights.pop(opt_weight_iter)\n break\n else:\n opt_weight_iter += 1\n\n # Delete variables in unique_ids_all_layers.\n for key in list(self._tls._unique_ids_all_layers.keys()):\n del self._tls._unique_ids_all_layers[key]", "def RemoveZeroVar(chain):\n\treturn chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]", "def remove_subset_from_set(metaobject, subset_key):\n # If it is not a list, check if subset key in the dictionary and just remove that key\n if not isinstance(metaobject, list):\n if subset_key in metaobject:\n del metaobject[subset_key]\n else:\n for obj in metaobject:\n # Iterate over the list and remove the key from each object if it is there\n if subset_key in obj:\n del obj[subset_key]\n\n return metaobject", "def RemoveZeroVar(chain):\n return chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]" ]
[ "0.8297461", "0.72585315", "0.72363526", "0.5895367", "0.5825849", "0.5779745", "0.5747303", "0.5530704", "0.55015457", "0.5498184", "0.5485593", "0.531922", "0.5292177", "0.5290783", "0.5279861", "0.52415746", "0.52012604", "0.5192966", "0.51445", "0.51120937", "0.509378", "0.5089281", "0.50321746", "0.5016518", "0.49682257", "0.49549922", "0.49549094", "0.48885074", "0.48691818", "0.48621133" ]
0.8265025
1
Removes a number of conic constraints from the problem. removecones(self,subset_)
def removecones(self,subset_): num_ = None if num_ is None: num_ = len(subset_) elif num_ != len(subset_): raise IndexError("Inconsistent length of array subset") if subset_ is None: raise ValueError("Argument subset cannot be None") if subset_ is None: raise ValueError("Argument subset may not be None") if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous: _subset_copyarray = False _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subset_ is not None: _subset_copyarray = True _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32)) _subset_np_tmp[:] = subset_ assert _subset_np_tmp.flags.contiguous _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subset_copyarray = False _subset_tmp = None res = __library__.MSK_XX_removecones(self.__nativep,num_,_subset_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removecones(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecones(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecons(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removecons(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removecons(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removecons(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removevars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removevars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removebarvars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removevars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removevars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def removebarvars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removebarvars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)", "def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)", "def clear_categories(self, subset=None, inclusive=None):\n if inclusive is not None:\n inc = inclusive\n else:\n inc = self.is_cat_inclusive\n if subset is None:\n # clear all categories\n self.cats = None\n else:\n # Do not use self[subset].blah = 0 ! - SettingWithCopyWarning\n if inc:\n self.cats = self.cats.drop(\n columns=[col for col in self.cats.columns.values if subset in col]\n )\n else:\n self.cats = self.cats.drop(columns=subset)\n if len(self.cat_labels) == 0:\n self.is_categorised = False\n self.is_cat_inclusive = False", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def __eliminate_unused_constraits (self, objects):\n result = []\n for c in self.constraints_:\n if c [0] in objects and c [1] in objects:\n result.append (c)\n\n return result", "def _remove_associated_consvar(self, all_cons_subclasses, all_var_subclasses,\n collection):\n\n if not hasattr(collection, '__iter__'):\n collection = [collection]\n\n strfy = lambda x:x if isinstance(x, str) else x.id\n\n for cons_type in all_cons_subclasses:\n for element in collection:\n try:\n cons = self._cons_kinds[cons_type.__name__].get_by_id(strfy(element))\n self.remove_constraint(cons)\n except KeyError as e:\n pass\n for var_type in all_var_subclasses:\n for element in collection:\n try:\n var = self._var_kinds[var_type.__name__].get_by_id(strfy(element))\n self.remove_variable(var)\n except KeyError as e:\n pass", "def _subtourelim(model, where):\n if where == GRB.callback.MIPSOL:\n # make a list of edges selected in the solution\n X = model.cbGetSolution(model._vars)\n n = int(sqrt(len(X)))\n selected = [(i,j) for i in range(n) for j in range(n) if X[(i,j)]>0.5]\n\n # find the shortest cycle in the selected edge list\n tour = _subtour(selected,n)\n if len(tour) < n:\n # add a subtour elimination constraint\n expr = quicksum(model._vars[tour[i], tour[j]]\n for i in range(len(tour))\n for j in range(i+1, len(tour)))\n model.cbLazy(expr <= len(tour)-1)", "def remove_unary_constraint(self, var):\n\n del self.__constraints[var]", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def _discretize(self, constraints_object):\n pass", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def test_remove_pbc_unsegmented():\n ref_array = load_structure(join(data_dir(\"structure\"), \"3o5r.mmtf\"))\n # Center structure in box\n centroid = struc.centroid(ref_array)\n box_center = np.diag(ref_array.box) / 2\n ref_array = struc.translate(ref_array, box_center-centroid)\n # Remove solvent\n ref_array = ref_array[~struc.filter_solvent(ref_array)]\n array = struc.remove_pbc(ref_array)\n\n assert ref_array.equal_annotation_categories(array)\n assert np.allclose(ref_array.coord, array.coord)", "def remove_small_cc(binary, thres=10):\n cc, n_cc = measure.label(binary)\n binary2 = np.copy(binary)\n for n in range(1, n_cc + 1):\n area = np.sum(cc == n)\n if area < thres:\n binary2[cc == n] = 0\n return binary2", "def test_remove_pbc_selections(multi_model):\n array = load_structure(join(data_dir(\"structure\"), \"3o5r.mmtf\"))\n if multi_model:\n array = struc.stack([array, array])\n \n struc.remove_pbc(array)\n struc.remove_pbc(array, array.chain_id[0])\n struc.remove_pbc(array, struc.filter_amino_acids(array))\n struc.remove_pbc(array, [struc.filter_amino_acids(array),\n (array.res_name == \"FK5\")])\n # Expect error when selectinf an atom multiple times\n with pytest.raises(ValueError):\n struc.remove_pbc(array, [struc.filter_amino_acids(array),\n (array.atom_name == \"CA\")])", "def prune(self,domains,constraint):\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n new_domains = deepcopy(domains)\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n new_domains[left_var[0]] = [right_val[0]]\n \n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n new_set = set(new_domains[left_var[0]]) & set(new_domains[right_var[0]])\n new_domains[left_var[0]] = list(new_set)\n new_domains[right_var[0]] = list(new_set)\n\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # print(l,r)\n # print(new_domains)\n # print(constraint)\n\n for var,mult in zip(left_var,left_const_mult):\n max_var = max(domains[var])\n comp = (r-(l-mult*max_var)) / mult\n for elem in domains[var]:\n if elem < comp:\n new_domains[var].remove(elem)\n\n for var,mult in zip(right_var,right_const_mult):\n min_var = min(domains[var])\n comp = (l-(r-mult*min_var)) / mult\n for elem in domains[var]:\n if elem > comp:\n new_domains[var].remove(elem)\n\n # for i,domain in enumerate(new_domains):\n # if len(domain) == 0:\n # print(i,l,r)\n # print(\"Old:\",domains)\n # print(\"New:\",new_domains)\n # print(domains)\n # print(constraint)\n # print(\"------------------------\")\n # raise SystemError(\"Domain is Empty!!\")\n\n return new_domains", "def prune_connections(net, subsample_indices):\n new_connections = []\n new_subsample_indices = []\n for i in range(len(subsample_indices)):\n if len(subsample_indices[i]) > 0:\n new_connections.append(net.connections[i])\n new_subsample_indices.append(subsample_indices[i])\n\n net.connections = new_connections\n return new_subsample_indices", "def filter_by_subset(self, *args):\n self.subset_labels = sorted(set(self.subset_labels + list(args)))\n return self", "def conj(self, vecs):\n raise NotImplementedError", "def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)", "def clearResonancePeakDimContribs(resonance,peaks=None):\n\n if not peaks:\n peaks = []\n\n peakDict = {}\n for peak in peaks:\n peakDict[peak] = True\n \n peakDims = {} \n for contrib in resonance.peakDimContribs:\n peakDim = contrib.peakDim\n \n if (not peakDict) or peakDict.get(peakDim.peak):\n peakDims[peakDim] = True\n peakContribs = contrib.peakContribs\n contrib.delete()\n \n for peakContrib in peakContribs:\n if not peakContrib.peakDimContribs:\n peakContrib.delete()", "def remove_cond(self, idx):\n model = self._model\n model.beginRemoveRows(ROOT_MIDX, idx, idx)\n del self._conds[idx]\n model.endRemoveRows()\n self.notify_wizard()", "def prune_conformers(self, param={'M':'cml1', 'rp':1.0,'thresh':0.25,'wz':F,'sort':T}):\n if param['M'] in ['rmsd']:\n ds = self.get_rmsd()\n elif param['M'] in ['cm','cml1']:\n ds = self.get_dcm(param)\n else:\n raise '#ERROR: unknow rep'\n #print ' ++ ds = ', ds\n #print ' |__ es = ', np.array(self.es)\n seq = np.argsort(self.es) # sort by increasing energy\n ccids = []\n for i in seq:\n # always keep lowest-energy conformer\n if len(ccids) == 0:\n ccids.append(i)\n continue\n\n # discard conformers within the RMSD threshold\n if np.all(ds[i][ccids] >= thresh):\n ccids.append(i)\n self.nconf = len(ccids)\n # creat a new mol object with unique conformers\n new = Chem.Mol(self.mol)\n new.RemoveAllConformers()\n for i in ccids:\n ci = self.mol.GetConformer(i)\n new.AddConformer(ci, assignId=True)\n self.mol = new" ]
[ "0.83215594", "0.7152217", "0.7077424", "0.6343125", "0.609424", "0.5977655", "0.5930062", "0.5876391", "0.5876157", "0.57800704", "0.5628625", "0.5434072", "0.5406661", "0.5321781", "0.5312178", "0.52586085", "0.5258399", "0.52080315", "0.51944923", "0.5176773", "0.513047", "0.5128241", "0.5118952", "0.51003486", "0.50569355", "0.50362694", "0.50162774", "0.49914062", "0.49873352", "0.49860454" ]
0.8212127
1
Appends semidefinite variables to the problem. appendbarvars(self,dim_)
def appendbarvars(self,dim_): num_ = None if num_ is None: num_ = len(dim_) elif num_ != len(dim_): raise IndexError("Inconsistent length of array dim") if dim_ is None: raise ValueError("Argument dim cannot be None") if dim_ is None: raise ValueError("Argument dim may not be None") if isinstance(dim_, numpy.ndarray) and dim_.dtype is numpy.dtype(numpy.int32) and dim_.flags.contiguous: _dim_copyarray = False _dim_tmp = ctypes.cast(dim_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif dim_ is not None: _dim_copyarray = True _dim_np_tmp = numpy.zeros(len(dim_),numpy.dtype(numpy.int32)) _dim_np_tmp[:] = dim_ assert _dim_np_tmp.flags.contiguous _dim_tmp = ctypes.cast(_dim_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _dim_copyarray = False _dim_tmp = None res = __library__.MSK_XX_appendbarvars(self.__nativep,num_,_dim_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendbarvars(self,dim): # 3\n num_ = None\n if num_ is None:\n num_ = len(dim)\n elif num_ != len(dim):\n raise IndexError(\"Inconsistent length of array dim\")\n if num_ is None: num_ = 0\n if dim is None: raise TypeError(\"Invalid type for argument dim\")\n if dim is None:\n dim_ = None\n else:\n try:\n dim_ = memoryview(dim)\n except TypeError:\n try:\n _tmparr_dim = array.array(\"i\",dim)\n except TypeError:\n raise TypeError(\"Argument dim has wrong type\")\n else:\n dim_ = memoryview(_tmparr_dim)\n \n else:\n if dim_.format != \"i\":\n dim_ = memoryview(array.array(\"i\",dim))\n \n res = self.__obj.appendbarvars(num_,dim_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset): # 3\n num_ = None\n if num_ is None:\n num_ = len(subset)\n elif num_ != len(subset):\n raise IndexError(\"Inconsistent length of array subset\")\n if num_ is None: num_ = 0\n if subset is None: raise TypeError(\"Invalid type for argument subset\")\n if subset is None:\n subset_ = None\n else:\n try:\n subset_ = memoryview(subset)\n except TypeError:\n try:\n _tmparr_subset = array.array(\"i\",subset)\n except TypeError:\n raise TypeError(\"Argument subset has wrong type\")\n else:\n subset_ = memoryview(_tmparr_subset)\n \n else:\n if subset_.format != \"i\":\n subset_ = memoryview(array.array(\"i\",subset))\n \n res = self.__obj.removebarvars(num_,subset_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def removebarvars(self,subset_):\n num_ = None\n if num_ is None:\n num_ = len(subset_)\n elif num_ != len(subset_):\n raise IndexError(\"Inconsistent length of array subset\")\n if subset_ is None:\n raise ValueError(\"Argument subset cannot be None\")\n if subset_ is None:\n raise ValueError(\"Argument subset may not be None\")\n if isinstance(subset_, numpy.ndarray) and subset_.dtype is numpy.dtype(numpy.int32) and subset_.flags.contiguous:\n _subset_copyarray = False\n _subset_tmp = ctypes.cast(subset_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subset_ is not None:\n _subset_copyarray = True\n _subset_np_tmp = numpy.zeros(len(subset_),numpy.dtype(numpy.int32))\n _subset_np_tmp[:] = subset_\n assert _subset_np_tmp.flags.contiguous\n _subset_tmp = ctypes.cast(_subset_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subset_copyarray = False\n _subset_tmp = None\n \n res = __library__.MSK_XX_removebarvars(self.__nativep,num_,_subset_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def plotMultipleVars(self, vars, series, groups=None, labels=None, postfix=\"\",logy=True, fixedrange=False):\n # split the variable names, we'll use the first one for naming purposes\n varnames = [var.split(\"_\") for var in vars]\n\n # create the separate dataframes from the provided groups\n # Define some labels if we have groups and no provided labels\n # Stack all the variables we want to plot in one histogram\n dfs = None\n if groups:\n dfs = [series.loc[g,:].stack() for g in groups]\n if not labels or len(labels) != len(groups):\n labels = [\"Group %s\" % (i+1) for i in xrange(len(groups)-1)]\n labels.append(\"Bulk\")\n else:\n dfs = [series.stack()]\n\n\n # Get right number of colors, and reverse them so that mediumpurple is \n # used for the bulk of the chips (assumed to be the last group)\n colors = (self.colorlist[:len(dfs)])\n colors.reverse()\n \n # Make the histogram\n # Get the preferred binning and check whether all values fall within that range \n if varnames[0][0] in cutinfo11:\n nbins = cutinfo11[varnames[0][0]][2]\n xmin = cutinfo11[varnames[0][0]][3]\n xmax = cutinfo11[varnames[0][0]][4]\n series_min = series.min().min()\n series_max = series.max().max()\n if fixedrange or (series_min > xmin and series_max < xmax):\n ax = plt.hist(dfs, bins=nbins, range=[xmin, xmax], stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=nbins, stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=20, stacked=True, \n color=colors, label=labels, log=logy)\n\n # Set the axis titles\n if varnames[0][0] in cutinfo11:\n if len(varnames[0]) == 1:\n plt.xlabel(cutinfo11[varnames[0][0]][0], \n fontsize=self.labelsize)\n else:\n plt.xlabel(\"%s ; %s\" % (cutinfo11[varnames[0][0]][0], varnames[0][1]), \n fontsize=self.labelsize)\n else:\n plt.xlabel(varnames[0][0], \n fontsize=self.labelsize)\n plt.ylabel(\"Number of measurements\", fontsize=self.labelsize)\n\n # set margins and format axis labels\n x0, x1, y0, y1 = plt.axis()\n if logy:\n plt.axis((x0, x1,\n 0.5, y1*10))\n else:\n plt.axis((x0, x1,\n 0.5, y1*(1+0.2)))\n ax = plt.gca()\n ax.tick_params(labelsize=self.ticklabelsize)\n plt.gcf().subplots_adjust(bottom=0.12)\n\n # Add mean and std info\n # Only use info on good chips, should be the last group in the list\n mean = dfs[-1].mean() #series.stack().mean()\n std = dfs[-1].std() #series.stack().std()\n plt.figtext(0.4, 0.92,\n \"Mean: %.3g Std/Mean: %.3g\\nStd: %.3g\"%(mean, std/mean, std),\n fontsize=self.ticklabelsize)\n\n # Add cut lines if we have info\n if self.cutfile != None and varnames[0][0] in cutinfo11:\n plt.axvline(x=self.cuts[varnames[0][0]][2], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][3], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][0], linestyle='solid', linewidth=2, color='dimgrey')\n plt.axvline(x=self.cuts[varnames[0][0]][1], linestyle='solid', linewidth=2, color='dimgrey')\n\n # Add legend if we have labels\n if labels:\n plt.legend(loc='best', ncol=2)\n\n # Save figure\n plt.savefig(\"%s/%s%s.pdf\" % (self.outputdir, varnames[0][0], postfix))\n plt.clf()", "def plot_vars(\n self,\n vars,\n axes=None,\n bins=None,\n start=None,\n stop=None,\n edges=None,\n transform=None,\n ):\n if self._delayed_mode:\n for name, var in vars.items():\n if not compatible_partitions(var, self._masks[0]):\n raise IncompatiblePartitions(\"plot_vars\", var, self._masks[0])\n else:\n for name, var in vars.items():\n if len(var) != len(self._masks[0]):\n raise ValueError(\n f\"The variable '{name}' has length '{len(var)}', but the masks have length '{len(self._masks[0])}'\"\n )\n\n hists = []\n labels = [\"initial\"] + [f\"N - {i}\" for i in self._names] + [\"N\"]\n\n bins = [None] * len(vars) if bins is None else bins\n start = [None] * len(vars) if start is None else start\n stop = [None] * len(vars) if stop is None else stop\n edges = [None] * len(vars) if edges is None else edges\n transform = [None] * len(vars) if transform is None else transform\n\n if axes is not None:\n axes = axes\n else:\n axes = []\n for (name, var), b, s1, s2, e, t in zip(\n vars.items(), bins, start, stop, edges, transform\n ):\n ax = coffea.util._gethistogramaxis(\n name, var, b, s1, s2, e, t, self._delayed_mode\n )\n axes.append(ax)\n\n checklengths = [\n len(x) == len(vars) for x in (axes, bins, start, stop, edges, transform)\n ]\n if not all(checklengths):\n raise ValueError(\n \"vars, axes, bins, start, stop, edges, and transform must be the same length\"\n )\n\n if not self._delayed_mode:\n for (name, var), axis in zip(vars.items(), axes):\n h = hist.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"N-1\"),\n )\n arr = awkward.flatten(var)\n h.fill(arr, awkward.zeros_like(arr))\n for i, mask in enumerate(self.result().masks, 1):\n arr = awkward.flatten(var[mask])\n h.fill(arr, awkward.full_like(arr, i, dtype=int))\n hists.append(h)\n\n else:\n for (name, var), axis in zip(vars.items(), axes):\n h = hist.dask.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"N-1\"),\n )\n arr = dask_awkward.flatten(var)\n h.fill(arr, dask_awkward.zeros_like(arr))\n for i, mask in enumerate(self.result().masks, 1):\n arr = dask_awkward.flatten(var[mask])\n h.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n hists.append(h)\n\n return hists, labels", "def appendvars(self,num_): # 3\n res = self.__obj.appendvars(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendvars(self,num_):\n res = __library__.MSK_XX_appendvars(self.__nativep,num_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getdimbarvarj(self,j_): # 3\n res,resargs = self.__obj.getdimbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dimbarvarj_return_value = resargs\n return _dimbarvarj_return_value", "def plot_vars(\n self,\n vars,\n axes=None,\n bins=None,\n start=None,\n stop=None,\n edges=None,\n transform=None,\n ):\n if self._delayed_mode:\n for name, var in vars.items():\n if not compatible_partitions(var, self._masksonecut[0]):\n raise IncompatiblePartitions(\"plot_vars\", var, self._masksonecut[0])\n else:\n for name, var in vars.items():\n if len(var) != len(self._masksonecut[0]):\n raise ValueError(\n f\"The variable '{name}' has length '{len(var)}', but the masks have length '{len(self._masksonecut[0])}'\"\n )\n\n histsonecut, histscutflow = [], []\n labels = [\"initial\"] + list(self._names)\n\n bins = [None] * len(vars) if bins is None else bins\n start = [None] * len(vars) if start is None else start\n stop = [None] * len(vars) if stop is None else stop\n edges = [None] * len(vars) if edges is None else edges\n transform = [None] * len(vars) if transform is None else transform\n\n if axes is not None:\n axes = axes\n else:\n axes = []\n for (name, var), b, s1, s2, e, t in zip(\n vars.items(), bins, start, stop, edges, transform\n ):\n ax = coffea.util._gethistogramaxis(\n name, var, b, s1, s2, e, t, self._delayed_mode\n )\n axes.append(ax)\n\n checklengths = [\n len(x) == len(vars) for x in (axes, bins, start, stop, edges, transform)\n ]\n if not all(checklengths):\n raise ValueError(\n \"vars, axes, bins, start, stop, edges, and transform must be the same length\"\n )\n\n if not self._delayed_mode:\n for (name, var), axis in zip(vars.items(), axes):\n honecut = hist.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"onecut\"),\n )\n hcutflow = honecut.copy()\n hcutflow.axes.name = name, \"cutflow\"\n\n arr = awkward.flatten(var)\n honecut.fill(arr, awkward.zeros_like(arr))\n hcutflow.fill(arr, awkward.zeros_like(arr))\n\n for i, mask in enumerate(self.result().masksonecut, 1):\n arr = awkward.flatten(var[mask])\n honecut.fill(arr, awkward.full_like(arr, i, dtype=int))\n histsonecut.append(honecut)\n\n for i, mask in enumerate(self.result().maskscutflow, 1):\n arr = awkward.flatten(var[mask])\n hcutflow.fill(arr, awkward.full_like(arr, i, dtype=int))\n histscutflow.append(hcutflow)\n\n else:\n for (name, var), axis in zip(vars.items(), axes):\n honecut = hist.dask.Hist(\n axis,\n hist.axis.Integer(0, len(labels), name=\"onecut\"),\n )\n hcutflow = honecut.copy()\n hcutflow.axes.name = name, \"cutflow\"\n\n arr = dask_awkward.flatten(var)\n honecut.fill(arr, dask_awkward.zeros_like(arr))\n hcutflow.fill(arr, dask_awkward.zeros_like(arr))\n\n for i, mask in enumerate(self.result().masksonecut, 1):\n arr = dask_awkward.flatten(var[mask])\n honecut.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n histsonecut.append(honecut)\n\n for i, mask in enumerate(self.result().maskscutflow, 1):\n arr = dask_awkward.flatten(var[mask])\n hcutflow.fill(arr, dask_awkward.full_like(arr, i, dtype=int))\n histscutflow.append(hcutflow)\n\n return histsonecut, histscutflow, labels", "def add_variable(self, variable, bins, thresholds):\n # TODO: this will no longer work since 1st dimension is pileup\n if variable in self.keys():\n logger.warn('Variable {0} already exists!')\n return\n self._thresholds[variable] = thresholds\n hist_names = []\n add_name = hist_names.append\n\n for puBinLower, puBinUpper in pairwise(self._pileUpBins):\n for threshold in thresholds:\n name = '{0}_threshold_gt{1}_pu{2}To{3}'.format(\n variable, threshold, puBinLower, puBinUpper)\n if not self[puBinLower][variable][threshold]:\n add_name(name)\n self[puBinLower][variable][\n threshold] = _EfficiencyCurve(name, bins, threshold)\n logger.debug('Created {0} histograms: {1}'.format(\n len(hist_names), ', '.join(hist_names)))", "def var_imp(modelname, ind_i):\n with open('../output_files/features.p', 'rb') as fp:\n features = pickle.load(fp)\n\n path_load = '../output_files/importances_' + modelname + '.npy'\n importances = np.load(path_load)\n\n # df of importances\n d = {'features': features, 'importances': importances}\n imp_df = pd.DataFrame(d)\n imp_df = imp_df.sort_values('importances', ascending=False)\n imp_df = imp_df.reset_index(drop=True)\n\n plt.title('Feature Importances')\n plt.barh(range(ind_i), imp_df['importances'][:ind_i], color='b', align='center')\n plt.yticks(range(ind_i), [imp_df['features'][i] for i in range(ind_i)])\n plt.xlabel('Relative Importance')\n plt.show()", "def draw_variables(self): \n z = self.q[0].draw_variable_local(self.sims)\n for i in range(1,len(self.q)):\n z = np.vstack((z,self.q[i].draw_variable_local(self.sims)))\n return z", "def value_to_bar(self):\n\n for variable in self._energy_state:\n setattr(self, \"{}_bar\".format(variable), getattr(self, variable))\n\n self.E_s_sum = self.E_s\n self.melt_sum = self.melt\n self.swi_sum = self.swi", "def add_variables(self, variables, cardinality, inhibitor_probability):\n if len(variables) == 1:\n if not isinstance(inhibitor_probability[0], (list, tuple)):\n inhibitor_probability = [inhibitor_probability]\n\n if len(variables) != len(cardinality):\n raise ValueError(\"Size of variables and cardinality should be same\")\n elif any(\n cardinal != len(prob_array)\n for prob_array, cardinal in zip(inhibitor_probability, cardinality)\n ) or len(cardinality) != len(inhibitor_probability):\n raise ValueError(\n \"Size of variables and inhibitor_probability should be same\"\n )\n elif not all(\n 0 <= item <= 1 for item in chain.from_iterable(inhibitor_probability)\n ):\n raise ValueError(\n \"Probability values should be between 0 and 1(both inclusive).\"\n )\n else:\n self.variables = np.concatenate((self.variables, variables))\n self.cardinality = np.concatenate((self.cardinality, cardinality))\n self.inhibitor_probability.extend(inhibitor_probability)", "def putbarxj(self,whichsol_,j_,barxj_):\n _barxj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barxj_ is not None and len(barxj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barxj is not long enough: Is %d, expected %d\" % (len(barxj_),self.getlenbarvarj((j_))))\n if barxj_ is None:\n raise ValueError(\"Argument barxj cannot be None\")\n if barxj_ is None:\n raise ValueError(\"Argument barxj may not be None\")\n if isinstance(barxj_, numpy.ndarray) and barxj_.dtype is numpy.dtype(numpy.float64) and barxj_.flags.contiguous:\n _barxj_copyarray = False\n _barxj_tmp = ctypes.cast(barxj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barxj_ is not None:\n _barxj_copyarray = True\n _barxj_np_tmp = numpy.zeros(len(barxj_),numpy.dtype(numpy.float64))\n _barxj_np_tmp[:] = barxj_\n assert _barxj_np_tmp.flags.contiguous\n _barxj_tmp = ctypes.cast(_barxj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barxj_copyarray = False\n _barxj_tmp = None\n \n res = __library__.MSK_XX_putbarxj(self.__nativep,whichsol_,j_,_barxj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def bvar(b):\n return (b - np.nanmean(b, axis=1)[:, np.newaxis])**2", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _parse_var_modelspace(self,varname) :\n\n\t\tmodelspace = self.ss.constraint.modelspace\n\t\tparams = getattr(modelspace,varname)\n\t\tnvars = len(self.ss.variables) # num of variables\n\n\t\tif varname in ('alpha','beta') : \n\t\t\tkeys = params.keys()\n\t\t\tvar_range = (params['defaultLowerBound'],\\\n\t\t\t\tparams['defaultUpperBound'])\n\t\t\tself.modelspace[varname] = [var_range]*nvars\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+',key)\t:\n\t\t\t\t\tidx = int(key.split('_')[1])\t\t\t\t\n\t\t\t\t\tself.modelspace[varname][idx-1] = params[key]\n\n\t\telif varname in ('g','h') :\n\t\t\tkeys = params.keys()\n\t\t\tvar_range = (params['defaultLowerBound'],\\\n\t\t\t\tparams['defaultUpperBound'])\n\n\t\t\t# This step is purely there cuz [[var_range]*nvars]*nvars\n\t\t\t# does not work\n\t\t\tvarlist = []\n\t\t\tfor ii in range(nvars) : \n\t\t\t\tvarlist.append([var_range]*nvars)\n\t\t\tself.modelspace[varname] = varlist\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+_\\d+',key)\t:\n\t\t\t\t\tidr,idc = map(int,(key.split('_')[1:3]))\n\t\t\t\t\tself.modelspace[varname][idr-1][idc-1] = params[key]\n\t\t\n\t\telse :\n\t\t\tlogging.error(\"Unrecognized varname %s quitting..\" \\\n\t\t\t%(varname))\n\t\t\tsys.exit(1)", "def push(self, **vars):\n self._variable_stack.append(dict(self._variables))\n self.update(**vars)", "def putvarbound(self,j_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarbound(self.__nativep,j_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _varimp_plot(self, num_of_features=None, server=False, save_plot_path=None):\n assert_is_type(num_of_features, None, int)\n assert_is_type(server, bool)\n\n plt = get_matplotlib_pyplot(server)\n if plt is None:\n return decorate_plot_result(figure=RAISE_ON_FIGURE_ACCESS)\n\n # get the variable importances as a list of tuples, do not use pandas dataframe\n importances = self.varimp(use_pandas=False)\n # features labels correspond to the first value of each tuple in the importances list\n feature_labels = [tup[0] for tup in importances]\n # relative importances correspond to the first value of each tuple in the importances list\n scaled_importances = [tup[2] for tup in importances]\n # specify bar centers on the y axis, but flip the order so largest bar appears at top\n pos = range(len(feature_labels))[::-1]\n # specify the bar lengths\n val = scaled_importances\n\n # default to 10 or less features if num_of_features is not specified\n if num_of_features is None:\n num_of_features = min(len(val), 10)\n\n fig, ax = plt.subplots(1, 1, figsize=(14, 10))\n # create separate plot for the case where num_of_features == 1\n if num_of_features == 1:\n plt.barh(pos[0:num_of_features], val[0:num_of_features], align=\"center\",\n height=0.8, color=\"#1F77B4\", edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n ax.margins(None, 0.5)\n\n else:\n plt.barh(pos[0:num_of_features], val[0:num_of_features], align=\"center\",\n height=0.8, color=\"#1F77B4\", edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])\n # ax.margins(y=0.5)\n\n # check which algorithm was used to select right plot title\n plt.title(\"Variable Importance: H2O %s\" % self._model_json[\"algo_full_name\"], fontsize=20)\n if not server:\n plt.show()\n \n if save_plot_path is not None:\n plt.savefig(fname=save_plot_path)\n\n return decorate_plot_result(figure=plt.gcf())", "def addVars(self, *indexes, **kwargs):\n ...", "def add_extra_dim(self, params: ExtraBytesParams) -> None:\n self.add_extra_dims([params])", "def finalize(self, vark):\n mask1 = self.npairs != 0\n mask2 = self.npairs == 0\n\n self.xi[mask1] /= self.weight[mask1]\n self.meanlogr[mask1] /= self.weight[mask1]\n self.varxi[mask1] = vark / self.npairs[mask1]\n\n # Update the units of meanlogr\n self.meanlogr[mask1] -= self.log_sep_units\n\n # Use meanlogr when available, but set to nominal when no pairs in bin.\n self.meanlogr[mask2] = self.logr[mask2]\n self.varxi[mask2] = 0.", "def variational_expectations(self, Fmu, Fvar, Y):\n integrand = self.log_prob\n nghp = self.num_gauss_hermite_points\n return ndiagquad(integrand, nghp, Fmu, Fvar, Y=Y)", "def qbar(xlist):\n dislin.qplbar(xlist, len(xlist))", "def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n else:\n raise ValueError(\"plot number must be 0, 1\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})", "def putbarsj(self,whichsol_,j_,barsj_):\n _barsj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barsj_ is not None and len(barsj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barsj is not long enough: Is %d, expected %d\" % (len(barsj_),self.getlenbarvarj((j_))))\n if barsj_ is None:\n raise ValueError(\"Argument barsj cannot be None\")\n if barsj_ is None:\n raise ValueError(\"Argument barsj may not be None\")\n if isinstance(barsj_, numpy.ndarray) and barsj_.dtype is numpy.dtype(numpy.float64) and barsj_.flags.contiguous:\n _barsj_copyarray = False\n _barsj_tmp = ctypes.cast(barsj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barsj_ is not None:\n _barsj_copyarray = True\n _barsj_np_tmp = numpy.zeros(len(barsj_),numpy.dtype(numpy.float64))\n _barsj_np_tmp[:] = barsj_\n assert _barsj_np_tmp.flags.contiguous\n _barsj_tmp = ctypes.cast(_barsj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barsj_copyarray = False\n _barsj_tmp = None\n \n res = __library__.MSK_XX_putbarsj(self.__nativep,whichsol_,j_,_barsj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _analyseVariables(self):\n self.unused_vars = []\n ffis_limited = False\n\n highest_rank = -1\n best_var = None\n count = 0\n\n # Need to get highest ranked variable (most dimensions) so that we can work out FFI\n for var in self.vars:\n msg = f\"Analysing: {var.name}\"\n self.output_message.append(msg)\n count = count + 1\n\n # get rank\n rank = len(var.shape)\n\n # Deal with singleton variables\n if rank == 0: \n self.rank_zero_vars.append(var)\n self.rank_zero_var_ids.append(var.name)\n continue\n\n # Update highest if highest found or if equals highest with bigger size\n try:\n var.size = var.size()\n best_var.size = best_var.size()\n except:\n pass\n\n if rank > highest_rank or (rank == highest_rank and var.size > best_var.size):\n highest_rank = rank\n best_var = var\n best_var_index = count - 1\n\n # If all are zero ranked variables or no vars identified/found then we cannot write any to NASA Ames and return ([], [])\n if len(self.rank_zero_vars) == len(self.vars) or best_var is None: \n return ([], [])\n\n # Now start to sort the variables into main and auxiliary \n vars_for_na = [best_var]\n aux_vars_for_na = []\n shape = best_var.shape\n number_of_dims = len(shape)\n self.na_dict[\"NIV\"] = number_of_dims\n\n # If 2D then do a quick test to see if 2310 is feasible (i.e. uniformly spaced 2nd axis)\n if number_of_dims == 2:\n\n ffis_limited = [2010, 2110]\n axis = xarray_utils.get_coord_by_index(best_var, 1)\n\n if xarray_utils.isUniformlySpaced(axis):\n ffis_limited.append(2310)\n\n # Get the axes for the main variable being used\n best_var_axes = xarray_utils.getAxisList(best_var)\n \n # Get other variables into a list and analyse them\n rest_of_the_vars = self.vars[:best_var_index] + self.vars[(best_var_index + 1):]\n\n for var in rest_of_the_vars:\n\n if var.name in self.rank_zero_var_ids: continue\n\n # What to do with variables that have different number of dimensions or different shape\n if len(var.shape) != number_of_dims or var.shape != shape: \n # Could it be an auxiliary variable?\n if len(var.shape) != 1: \n self.unused_vars.append(var)\n continue\n\n first_axis = xarray_utils.get_coord_by_index(var, 0)\n # Check if axis is identical to first axis of main best variable, if so, can be auxiliary var\n if not xarray_utils.areAxesIdentical(best_var_axes[0], first_axis):\n\n # If not identical, then it might still qualify as an auxiliary every n time points - valid for 1020\n if len(var.shape) == 1:\n nvpm = xarray_utils.isAxisRegularlySpacedSubsetOf(first_axis, best_var_axes[0])\n\n # NVPM is the number of implied values which is equal to (len(ax2)/len(ax1))\n if nvpm:\n ffis_limited = [1020]\n self.na_dict[\"NVPM\"] = nvpm\n else: # if returned False, i.e. not regular subset axis\n self.unused_vars.append(var)\n\n else:\n self.unused_vars.append(var)\n continue\n\n else:\n # This could be used as a standard auxiliary variable\n if ffis_limited in ([1020],):\n # Already fixed on 1020 and cannot collect incompatible FFI vars so do not use\n self.unused_vars.append(var)\n else:\n aux_vars_for_na.append(var) \n\n else:\n this_var_axes = xarray_utils.getAxisList(var)\n\n # Loop through dimensions\n for i in range(number_of_dims): \n\n if not xarray_utils.areAxesIdentical(best_var_axes[i], this_var_axes[i]):\n self.unused_vars.append(var)\n break\n else:\n # OK, I think the current variable is compatible to write with the best variable along with a NASA Ames file \n vars_for_na.append(var)\n\n # Send vars_for_na AND aux_vars_for_na to a method to check if they have previously been mapped \n # from NASA Ames. In which case we'll write them back in the order they were initially read from the input file.\n (vars_for_na, aux_vars_for_na) = \\\n self._reorderVarsIfPreviouslyNA(vars_for_na, aux_vars_for_na)\n\n # Get the FFI\n self.na_dict[\"FFI\"] = \\\n self._decideFileFormatIndex(number_of_dims, aux_vars_for_na, ffis_limited)\n\n return vars_for_na, aux_vars_for_na", "def set_obs(self, num_obs):\n curr_obs = self._nobs\n if num_obs < curr_obs:\n raise ValueError(\"num_obs must be >= \" + str(curr_obs))\n if num_obs == curr_obs:\n return\n isstrvar = self._isstrvar\n empty_row = ['' if isstrvar(i) else MISSING for i in range(self._nvar)]\n self._varvals += [copy.copy(empty_row) \n for _ in range(num_obs - curr_obs)]\n self._nobs = num_obs\n self._changed = True\n # Need to clear srtlist. If there are string variables, there \n # might now be empty strings after non-empty string. If there \n # are numerical variables with extended missing, there will now \n # be \".\" missing after extended missing. Issue pointed out at\n # http://www.stata.com/statalist/archive/2013-08/msg00576.html\n self._srtlist = [None]*self._nvar" ]
[ "0.8159918", "0.55441445", "0.53684646", "0.533822", "0.5304417", "0.5274865", "0.51826805", "0.51651216", "0.5160413", "0.5143895", "0.5126579", "0.49842018", "0.49332213", "0.4922936", "0.48779106", "0.47955328", "0.47735283", "0.47632688", "0.47520563", "0.47460842", "0.474476", "0.4738471", "0.4728183", "0.4706734", "0.46899274", "0.46833193", "0.46790397", "0.46484372", "0.46444657", "0.46335393" ]
0.80840147
1
Appends a new conic constraint to the problem. appendcone(self,ct_,conepar_,submem_)
def appendcone(self,ct_,conepar_,submem_): nummem_ = None if nummem_ is None: nummem_ = len(submem_) elif nummem_ != len(submem_): raise IndexError("Inconsistent length of array submem") if submem_ is None: raise ValueError("Argument submem cannot be None") if submem_ is None: raise ValueError("Argument submem may not be None") if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous: _submem_copyarray = False _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif submem_ is not None: _submem_copyarray = True _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32)) _submem_np_tmp[:] = submem_ assert _submem_np_tmp.flags.contiguous _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _submem_copyarray = False _submem_tmp = None res = __library__.MSK_XX_appendcone(self.__nativep,ct_,conepar_,nummem_,_submem_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendcone(self,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.appendcone(ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconeseq(self,ct_,conepar_,nummem_,j_): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n res = self.__obj.appendconeseq(ct_,conepar_,nummem_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconeseq(self,ct_,conepar_,nummem_,j_):\n res = __library__.MSK_XX_appendconeseq(self.__nativep,ct_,conepar_,nummem_,j_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcone(self,k_,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.putcone(k_,ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcone(self,k_,ct_,conepar_,submem_):\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem_)\n elif nummem_ != len(submem_):\n raise IndexError(\"Inconsistent length of array submem\")\n if submem_ is None:\n raise ValueError(\"Argument submem cannot be None\")\n if submem_ is None:\n raise ValueError(\"Argument submem may not be None\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_putcone(self.__nativep,k_,ct_,conepar_,nummem_,_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconesseq(self,ct_,conepar_,nummem_,j_):\n num_ = None\n if num_ is None:\n num_ = len(ct_)\n elif num_ != len(ct_):\n raise IndexError(\"Inconsistent length of array ct\")\n if num_ is None:\n num_ = len(conepar_)\n elif num_ != len(conepar_):\n raise IndexError(\"Inconsistent length of array conepar\")\n if num_ is None:\n num_ = len(nummem_)\n elif num_ != len(nummem_):\n raise IndexError(\"Inconsistent length of array nummem\")\n if ct_ is None:\n raise ValueError(\"Argument ct cannot be None\")\n if ct_ is None:\n raise ValueError(\"Argument ct may not be None\")\n if ct_ is not None:\n _ct_tmp = (ctypes.c_int32 * len(ct_))(*ct_)\n else:\n _ct_tmp = None\n if conepar_ is None:\n raise ValueError(\"Argument conepar cannot be None\")\n if conepar_ is None:\n raise ValueError(\"Argument conepar may not be None\")\n if isinstance(conepar_, numpy.ndarray) and conepar_.dtype is numpy.dtype(numpy.float64) and conepar_.flags.contiguous:\n _conepar_copyarray = False\n _conepar_tmp = ctypes.cast(conepar_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif conepar_ is not None:\n _conepar_copyarray = True\n _conepar_np_tmp = numpy.zeros(len(conepar_),numpy.dtype(numpy.float64))\n _conepar_np_tmp[:] = conepar_\n assert _conepar_np_tmp.flags.contiguous\n _conepar_tmp = ctypes.cast(_conepar_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _conepar_copyarray = False\n _conepar_tmp = None\n \n if nummem_ is None:\n raise ValueError(\"Argument nummem cannot be None\")\n if nummem_ is None:\n raise ValueError(\"Argument nummem may not be None\")\n if isinstance(nummem_, numpy.ndarray) and nummem_.dtype is numpy.dtype(numpy.int32) and nummem_.flags.contiguous:\n _nummem_copyarray = False\n _nummem_tmp = ctypes.cast(nummem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif nummem_ is not None:\n _nummem_copyarray = True\n _nummem_np_tmp = numpy.zeros(len(nummem_),numpy.dtype(numpy.int32))\n _nummem_np_tmp[:] = nummem_\n assert _nummem_np_tmp.flags.contiguous\n _nummem_tmp = ctypes.cast(_nummem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _nummem_copyarray = False\n _nummem_tmp = None\n \n res = __library__.MSK_XX_appendconesseq(self.__nativep,num_,_ct_tmp,_conepar_tmp,_nummem_tmp,j_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconesseq(self,ct,conepar,nummem,j_): # 3\n num_ = None\n if num_ is None:\n num_ = len(ct)\n elif num_ != len(ct):\n raise IndexError(\"Inconsistent length of array ct\")\n if num_ is None:\n num_ = len(conepar)\n elif num_ != len(conepar):\n raise IndexError(\"Inconsistent length of array conepar\")\n if num_ is None:\n num_ = len(nummem)\n elif num_ != len(nummem):\n raise IndexError(\"Inconsistent length of array nummem\")\n if num_ is None: num_ = 0\n if ct is None: raise TypeError(\"Invalid type for argument ct\")\n if ct is None:\n ct_ = None\n else:\n try:\n ct_ = memoryview(ct)\n except TypeError:\n try:\n _tmparr_ct = array.array(\"i\",ct)\n except TypeError:\n raise TypeError(\"Argument ct has wrong type\")\n else:\n ct_ = memoryview(_tmparr_ct)\n \n else:\n if ct_.format != \"i\":\n ct_ = memoryview(array.array(\"i\",ct))\n \n if conepar is None: raise TypeError(\"Invalid type for argument conepar\")\n if conepar is None:\n conepar_ = None\n else:\n try:\n conepar_ = memoryview(conepar)\n except TypeError:\n try:\n _tmparr_conepar = array.array(\"d\",conepar)\n except TypeError:\n raise TypeError(\"Argument conepar has wrong type\")\n else:\n conepar_ = memoryview(_tmparr_conepar)\n \n else:\n if conepar_.format != \"d\":\n conepar_ = memoryview(array.array(\"d\",conepar))\n \n if nummem is None: raise TypeError(\"Invalid type for argument nummem\")\n if nummem is None:\n nummem_ = None\n else:\n try:\n nummem_ = memoryview(nummem)\n except TypeError:\n try:\n _tmparr_nummem = array.array(\"i\",nummem)\n except TypeError:\n raise TypeError(\"Argument nummem has wrong type\")\n else:\n nummem_ = memoryview(_tmparr_nummem)\n \n else:\n if nummem_.format != \"i\":\n nummem_ = memoryview(array.array(\"i\",nummem))\n \n res = self.__obj.appendconesseq(num_,ct_,conepar_,nummem_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def con_ceq(x,project):\n \n cons = project.con_ceq(x)\n \n if cons: cons = array(cons)\n else: cons = zeros([0])\n \n return cons", "def append_construct(self, c):\n if self.array_index is not None:\n self.parent_item.construct.args[self.arg_index].insert(self.array_index + 1, c)\n else:\n raise ValueError(\"Invalid parent\")", "def getcone(self,k_,submem_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n _submem_minlength = self.getconeinfo((k_))[2]\n if self.getconeinfo((k_))[2] > 0 and submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem is not long enough: Is %d, expected %d\" % (len(submem_),self.getconeinfo((k_))[2]))\n if isinstance(submem_,numpy.ndarray) and not submem_.flags.writeable:\n raise ValueError(\"Argument submem must be writable\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_getcone(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_),_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n if _submem_copyarray:\n submem_[:] = _submem_np_tmp\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def set_conectividad(self, conec):\n self.add_conec_listoflists(conec) # calcula el ne y el je", "def constraint(self, c):\n self.add_constraint(c)", "def add_constraint(self, kind, hook, expr, queue=False,**kwargs):\n\n if isinstance(expr, GenericVariable):\n # make sure we actually pass the optlang variable\n expr = expr.variable\n\n # Initialisation links to the cobra_model\n cons = kind(hook, expr, # problem = self.problem,\n # lb=lower_bound if lower_bound != float('-inf') else None,\n # ub=upper_bound if upper_bound != float('inf') else None,\n queue=queue,\n **kwargs)\n self._cons_dict[cons.name] = cons\n self.logger.debug('Added constraint: {}'.format(cons.name))\n # self.add_cons_vars(cons.constraint)\n\n return cons", "def putconboundlistconst(self,sub_,bkc_,blc_,buc_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n res = __library__.MSK_XX_putconboundlistconst(self.__nativep,num_,_sub_tmp,bkc_,blc_,buc_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_compartment(self, Vp=1, Qp=1):\n self.__n_compartments += 1\n self.__compartments.append({'Vp': Vp, 'Qp': Qp})", "def addConstrs(self, constrs, name=''):\n ...", "def cone(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True, degree:\n Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio: Union[float,\n bool]=2.0, nodeState: Union[int, bool]=0, pivot: Union[List[float, float, float],\n bool]=None, radius: Union[float, bool]=1.0, sections: Union[int, bool]=8, spans:\n Union[int, bool]=1, startSweep: Union[float, bool]=0, tolerance: Union[float,\n bool]=0.01, useOldInitBehaviour: bool=False, useTolerance: bool=False,\n constructionHistory: bool=True, name: AnyStr=\"\", object: bool=True, polygon: int=0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def addCompartment(self, *args):\n return _libsbml.Model_addCompartment(self, *args)", "def putqcon(self,qcsubk_,qcsubi_,qcsubj_,qcval_):\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi_)\n elif numqcnz_ != len(qcsubi_):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj_)\n elif numqcnz_ != len(qcsubj_):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval_)\n elif numqcnz_ != len(qcval_):\n raise IndexError(\"Inconsistent length of array qcval\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk cannot be None\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk may not be None\")\n if isinstance(qcsubk_, numpy.ndarray) and qcsubk_.dtype is numpy.dtype(numpy.int32) and qcsubk_.flags.contiguous:\n _qcsubk_copyarray = False\n _qcsubk_tmp = ctypes.cast(qcsubk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubk_ is not None:\n _qcsubk_copyarray = True\n _qcsubk_np_tmp = numpy.zeros(len(qcsubk_),numpy.dtype(numpy.int32))\n _qcsubk_np_tmp[:] = qcsubk_\n assert _qcsubk_np_tmp.flags.contiguous\n _qcsubk_tmp = ctypes.cast(_qcsubk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubk_copyarray = False\n _qcsubk_tmp = None\n \n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi cannot be None\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj cannot be None\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n if qcval_ is None:\n raise ValueError(\"Argument qcval cannot be None\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n res = __library__.MSK_XX_putqcon(self.__nativep,numqcnz_,_qcsubk_tmp,_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def conc_after(V, C_e, Q, t, t_e):\n C = 0.21-(0.21-C_e)*math.e**-(abs(Q)/V*(t-t_e))\n return C", "def add_constraint(self, constraint, problem):\n problem += constraint", "def _AddCompound(self, kegg_id, how_many):\n i = self._FindCompoundIndex(kegg_id)\n if i is not None:\n self.reactants[i].coeff += how_many\n else:\n self.reactants += [CompoundWithCoeff.FromId(how_many, kegg_id)]\n \n # clear the cache since the reaction has changed\n self._catalyzing_enzymes = None", "def add(self, inp, pos):\n self.pos = pos\n self.para = list()\n # Call backend for dependency parsing.\n cabo = CabochaClient()\n cabo.add(self.proc.query(inp), self.pos)\n pool = [cabo.root]\n plist = [cabo.root]\n self.vlist = dict()\n # Use BFS to get a list of nodes.\n while pool:\n pid = pool.pop(0)\n for cid in cabo.childrenList[pid]:\n pool.append(cid)\n plist.insert(0, cid)\n # Add nodes using plist(from leaves to roots).\n for i in range(len(plist)):\n pid = plist[i]\n self._addChildren(pid, cabo.chunks)\n self._processPara()\n\n # Return here if self.autosub is False.\n if not self.autosub:\n return\n # If root has no subject, add omitted subject node.\n if self.G.nodes[cabo.chunks[cabo.root].main]['sub'] == '':\n omitted = CaboChunk(-1, cabo.root)\n omitted.main = \"省略される主体[{0}@{1}]\".format(self.pos, 0)\n omitted.func = \"(省略)\"\n omitted.type = 0\n omitted.pro = 7\n omitted.surface = \"省略される主体\"\n omitted.yomi = \"ショウリャクサレルシュゴ\"\n self._addNode(omitted)\n self._addEdge(omitted.main, cabo.chunks[cabo.root].main, label=\"(省略)主体\", etype=\"sub\")\n self.G.nodes[cabo.chunks[cabo.root].main]['sub'] = omitted.main\n # Add autosub\n for i in range(len(plist)):\n pid = plist[i]\n if cabo.chunks[pid].type in [1, 2] and self.G.nodes[cabo.chunks[pid].main]['sub']== \"\":\n self._addEdge(self.G.nodes[cabo.chunks[cabo.root].main]['sub'], cabo.chunks[pid].main, label=\"主体候補\", etype=\"autosub\")\n self.G.nodes[cabo.chunks[pid].main]['sub'] = self.G.nodes[cabo.chunks[cabo.root].main]['sub']", "def addnewaccl(zs,ze,ez=0.,ap=0.,ax=0.,ay=0.,ox=0.,oy=0.,xw=0.,sw=0.,\n et=0.,ts=0.,dt=0.,\n time=None,data=None,func=None):\n # --- Make sure that at least some of the element is in the proper range,\n # --- z >= 0., and if zlatperi != 0, z <= zlatperi.\n assert (zs < ze),\"element start must be less than element end\"\n assert (top.zlatperi == 0.) or (ze > 0.),\"element end must be greater than zero if top.zlatperi is nonzero\"\n assert (top.zlatperi == 0.) or (zs < top.zlatperi),\"element start must be less than zlatperi if top.zlatperi is nonzero\"\n\n # --- Get a dict of the input arguments and their values.\n ldict = locals()\n\n # --- Setup the lattice arrays for the insertion of the new element. If\n # --- there are already accls, then find the place where the new one is to\n # --- be inserted and shift the existing data to open up a space.\n # --- Note that this uses that same check as in resetlat, that zs != ze to\n # --- determine whether or not a accl is defined.\n ie = 0\n # --- Find which element the new one goes before.\n while (ie <= top.naccl and top.acclzs[ie] <= zs and\n top.acclzs[ie] != top.acclze[ie]):\n ie = ie + 1\n\n # --- Increase the size of the arrays if the element will go past the end\n # --- or if the array is full (i.e. the last element is used).\n if ie > top.naccl or top.acclzs[-1] != top.acclze[-1]:\n top.naccl = top.naccl + 100\n gchange(\"Lattice\")\n if isinstance(et,(ndarray,collections.Sequence)) and len(et)-1 > top.ntaccl:\n top.ntaccl = len(et) - 1\n gchange(\"Lattice\")\n\n # --- Setup dictionary relating lattice array with input argument names.\n # --- This is done here so that the references to the lattice arrays\n # --- refer to the updated memory locations after the gchange.\n edict={'zs':top.acclzs,'ze':top.acclze,'ez':top.acclez,\n 'ap':top.acclap,'ax':top.acclax,'ay':top.acclay,\n 'ox':top.acclox,'oy':top.accloy,'xw':top.acclxw,'sw':top.acclsw,\n 'et':top.acclet,'ts':top.acclts,'dt':top.accldt}\n\n # --- Shift the existing data in the arrays to open up a space for the\n # --- new element.\n if ie <= top.naccl:\n for e in edict.itervalues():\n if len(shape(e)) == 1:\n e[ie+1:] = e[ie:-1] + 0\n else:\n # --- acclet is 2-D\n e[:,ie+1:] = e[:,ie:-1] + 0\n\n # --- Insert the new element. Note that edict correlates the lattice array\n # --- with the input arguments and ldict correlate the arguements with\n # --- their values.\n for (xx,e) in edict.iteritems():\n if len(shape(e)) == 1:\n e[ie] = ldict[xx]\n else:\n # --- acclet is 2-D\n e[:,ie] = ldict[xx]\n\n # --- resetlat must be called before the data can be used\n top.lresetlat = true\n\n if (time is not None and data is not None) or func is not None:\n tdle = TimeDependentLatticeElement('acclez',ie,time,data,func)\n return ie,tdle\n\n return ie", "def appendcons(self,num_): # 3\n res = self.__obj.appendcons(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def conj(x, *args, **kwargs):\n raise NotImplementedError", "def add_corridor_constraint(self,seg,r,weight=1.0):\n\n constraint_type = \"cylinder\"\n params = dict()\n params['x1'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg],\n self.qr_polytraj.waypoints['y'][0,seg],\n self.qr_polytraj.waypoints['z'][0,seg]])\n params['x2'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg+1],\n self.qr_polytraj.waypoints['y'][0,seg+1],\n self.qr_polytraj.waypoints['z'][0,seg+1]])\n params['der'] = 0\n params['l'] = r # Give the same radius buffer on the end caps\n params['r'] = r\n params['weight'] = weight\n params['keep_out'] = False\n params['active_seg'] = seg\n\n\n self.qr_polytraj.add_constraint(constraint_type,params,dynamic_weighting=False,sum_func=False)", "def append_constraints(parent_constraints, new_constraints):\n new_con_dict = copy.deepcopy(parent_constraints)\n for con in new_constraints:\n new_con_dict[con[1]].append((con[0], con[2])) # Maps v -> (agent, time)\n return new_con_dict", "def convex_conj(self):\n convex_conjs = [func.convex_conj for func in self.functionals]\n return SeparableSum(*convex_conjs)" ]
[ "0.8244941", "0.77045983", "0.74703234", "0.74599016", "0.7063304", "0.6341248", "0.6041502", "0.5657737", "0.53261125", "0.5275094", "0.5257149", "0.5240593", "0.52378", "0.5133575", "0.51317424", "0.51154685", "0.51087177", "0.50969726", "0.5067879", "0.50591534", "0.5019988", "0.4972472", "0.4966355", "0.4958446", "0.49412066", "0.49196264", "0.48987722", "0.48906934", "0.4861032", "0.48325443" ]
0.8216878
1
Appends a new conic constraint to the problem. appendconeseq(self,ct_,conepar_,nummem_,j_)
def appendconeseq(self,ct_,conepar_,nummem_,j_): res = __library__.MSK_XX_appendconeseq(self.__nativep,ct_,conepar_,nummem_,j_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendconeseq(self,ct_,conepar_,nummem_,j_): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n res = self.__obj.appendconeseq(ct_,conepar_,nummem_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconesseq(self,ct_,conepar_,nummem_,j_):\n num_ = None\n if num_ is None:\n num_ = len(ct_)\n elif num_ != len(ct_):\n raise IndexError(\"Inconsistent length of array ct\")\n if num_ is None:\n num_ = len(conepar_)\n elif num_ != len(conepar_):\n raise IndexError(\"Inconsistent length of array conepar\")\n if num_ is None:\n num_ = len(nummem_)\n elif num_ != len(nummem_):\n raise IndexError(\"Inconsistent length of array nummem\")\n if ct_ is None:\n raise ValueError(\"Argument ct cannot be None\")\n if ct_ is None:\n raise ValueError(\"Argument ct may not be None\")\n if ct_ is not None:\n _ct_tmp = (ctypes.c_int32 * len(ct_))(*ct_)\n else:\n _ct_tmp = None\n if conepar_ is None:\n raise ValueError(\"Argument conepar cannot be None\")\n if conepar_ is None:\n raise ValueError(\"Argument conepar may not be None\")\n if isinstance(conepar_, numpy.ndarray) and conepar_.dtype is numpy.dtype(numpy.float64) and conepar_.flags.contiguous:\n _conepar_copyarray = False\n _conepar_tmp = ctypes.cast(conepar_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif conepar_ is not None:\n _conepar_copyarray = True\n _conepar_np_tmp = numpy.zeros(len(conepar_),numpy.dtype(numpy.float64))\n _conepar_np_tmp[:] = conepar_\n assert _conepar_np_tmp.flags.contiguous\n _conepar_tmp = ctypes.cast(_conepar_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _conepar_copyarray = False\n _conepar_tmp = None\n \n if nummem_ is None:\n raise ValueError(\"Argument nummem cannot be None\")\n if nummem_ is None:\n raise ValueError(\"Argument nummem may not be None\")\n if isinstance(nummem_, numpy.ndarray) and nummem_.dtype is numpy.dtype(numpy.int32) and nummem_.flags.contiguous:\n _nummem_copyarray = False\n _nummem_tmp = ctypes.cast(nummem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif nummem_ is not None:\n _nummem_copyarray = True\n _nummem_np_tmp = numpy.zeros(len(nummem_),numpy.dtype(numpy.int32))\n _nummem_np_tmp[:] = nummem_\n assert _nummem_np_tmp.flags.contiguous\n _nummem_tmp = ctypes.cast(_nummem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _nummem_copyarray = False\n _nummem_tmp = None\n \n res = __library__.MSK_XX_appendconesseq(self.__nativep,num_,_ct_tmp,_conepar_tmp,_nummem_tmp,j_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendcone(self,ct_,conepar_,submem_):\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem_)\n elif nummem_ != len(submem_):\n raise IndexError(\"Inconsistent length of array submem\")\n if submem_ is None:\n raise ValueError(\"Argument submem cannot be None\")\n if submem_ is None:\n raise ValueError(\"Argument submem may not be None\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_appendcone(self.__nativep,ct_,conepar_,nummem_,_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendcone(self,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.appendcone(ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def appendconesseq(self,ct,conepar,nummem,j_): # 3\n num_ = None\n if num_ is None:\n num_ = len(ct)\n elif num_ != len(ct):\n raise IndexError(\"Inconsistent length of array ct\")\n if num_ is None:\n num_ = len(conepar)\n elif num_ != len(conepar):\n raise IndexError(\"Inconsistent length of array conepar\")\n if num_ is None:\n num_ = len(nummem)\n elif num_ != len(nummem):\n raise IndexError(\"Inconsistent length of array nummem\")\n if num_ is None: num_ = 0\n if ct is None: raise TypeError(\"Invalid type for argument ct\")\n if ct is None:\n ct_ = None\n else:\n try:\n ct_ = memoryview(ct)\n except TypeError:\n try:\n _tmparr_ct = array.array(\"i\",ct)\n except TypeError:\n raise TypeError(\"Argument ct has wrong type\")\n else:\n ct_ = memoryview(_tmparr_ct)\n \n else:\n if ct_.format != \"i\":\n ct_ = memoryview(array.array(\"i\",ct))\n \n if conepar is None: raise TypeError(\"Invalid type for argument conepar\")\n if conepar is None:\n conepar_ = None\n else:\n try:\n conepar_ = memoryview(conepar)\n except TypeError:\n try:\n _tmparr_conepar = array.array(\"d\",conepar)\n except TypeError:\n raise TypeError(\"Argument conepar has wrong type\")\n else:\n conepar_ = memoryview(_tmparr_conepar)\n \n else:\n if conepar_.format != \"d\":\n conepar_ = memoryview(array.array(\"d\",conepar))\n \n if nummem is None: raise TypeError(\"Invalid type for argument nummem\")\n if nummem is None:\n nummem_ = None\n else:\n try:\n nummem_ = memoryview(nummem)\n except TypeError:\n try:\n _tmparr_nummem = array.array(\"i\",nummem)\n except TypeError:\n raise TypeError(\"Argument nummem has wrong type\")\n else:\n nummem_ = memoryview(_tmparr_nummem)\n \n else:\n if nummem_.format != \"i\":\n nummem_ = memoryview(array.array(\"i\",nummem))\n \n res = self.__obj.appendconesseq(num_,ct_,conepar_,nummem_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcone(self,k_,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.putcone(k_,ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcone(self,k_,ct_,conepar_,submem_):\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem_)\n elif nummem_ != len(submem_):\n raise IndexError(\"Inconsistent length of array submem\")\n if submem_ is None:\n raise ValueError(\"Argument submem cannot be None\")\n if submem_ is None:\n raise ValueError(\"Argument submem may not be None\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_putcone(self.__nativep,k_,ct_,conepar_,nummem_,_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def append_construct(self, c):\n if self.array_index is not None:\n self.parent_item.construct.args[self.arg_index].insert(self.array_index + 1, c)\n else:\n raise ValueError(\"Invalid parent\")", "def con_ceq(x,project):\n \n cons = project.con_ceq(x)\n \n if cons: cons = array(cons)\n else: cons = zeros([0])\n \n return cons", "def appendcons(self,num_):\n res = __library__.MSK_XX_appendcons(self.__nativep,num_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def conj(traj):\r\n return Trajectory(np.conj(traj.modes))", "def putqcon(self,qcsubk_,qcsubi_,qcsubj_,qcval_):\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi_)\n elif numqcnz_ != len(qcsubi_):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj_)\n elif numqcnz_ != len(qcsubj_):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval_)\n elif numqcnz_ != len(qcval_):\n raise IndexError(\"Inconsistent length of array qcval\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk cannot be None\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk may not be None\")\n if isinstance(qcsubk_, numpy.ndarray) and qcsubk_.dtype is numpy.dtype(numpy.int32) and qcsubk_.flags.contiguous:\n _qcsubk_copyarray = False\n _qcsubk_tmp = ctypes.cast(qcsubk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubk_ is not None:\n _qcsubk_copyarray = True\n _qcsubk_np_tmp = numpy.zeros(len(qcsubk_),numpy.dtype(numpy.int32))\n _qcsubk_np_tmp[:] = qcsubk_\n assert _qcsubk_np_tmp.flags.contiguous\n _qcsubk_tmp = ctypes.cast(_qcsubk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubk_copyarray = False\n _qcsubk_tmp = None\n \n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi cannot be None\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj cannot be None\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n if qcval_ is None:\n raise ValueError(\"Argument qcval cannot be None\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n res = __library__.MSK_XX_putqcon(self.__nativep,numqcnz_,_qcsubk_tmp,_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _append_cx(self, i, j):\n\n if not 0 <= i < self.num_qubits or not 0 <= j < self.num_qubits:\n raise QiskitError(\"CX qubits are out of bounds.\")\n self.linear[j] = (self.linear[i] + self.linear[j]) % 2\n self.shift[j] = (self.shift[i] + self.shift[j]) % 2", "def set_conectividad(self, conec):\n self.add_conec_listoflists(conec) # calcula el ne y el je", "def appendcons(self,num_): # 3\n res = self.__obj.appendcons(num_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def attach_CA(self):\n n = self.pC\n self.A[n] = self._mps_CA(self.C, self.A[n])", "def constraint(self, c):\n self.add_constraint(c)", "def putqconk(self,k_,qcsubi_,qcsubj_,qcval_):\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi_)\n elif numqcnz_ != len(qcsubi_):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj_)\n elif numqcnz_ != len(qcsubj_):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval_)\n elif numqcnz_ != len(qcval_):\n raise IndexError(\"Inconsistent length of array qcval\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi cannot be None\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj cannot be None\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n if qcval_ is None:\n raise ValueError(\"Argument qcval cannot be None\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n res = __library__.MSK_XX_putqconk(self.__nativep,k_,numqcnz_,_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def conj(q):\n q = np.array([q[0]])\n q[0,1]=-q[0,1]\n q[0,2]=-q[0,2]\n q[0,3]=-q[0,3]\n complexconjugate = quatreal(q)\n return complexconjugate", "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def add(self, inp, pos):\n self.pos = pos\n self.para = list()\n # Call backend for dependency parsing.\n cabo = CabochaClient()\n cabo.add(self.proc.query(inp), self.pos)\n pool = [cabo.root]\n plist = [cabo.root]\n self.vlist = dict()\n # Use BFS to get a list of nodes.\n while pool:\n pid = pool.pop(0)\n for cid in cabo.childrenList[pid]:\n pool.append(cid)\n plist.insert(0, cid)\n # Add nodes using plist(from leaves to roots).\n for i in range(len(plist)):\n pid = plist[i]\n self._addChildren(pid, cabo.chunks)\n self._processPara()\n\n # Return here if self.autosub is False.\n if not self.autosub:\n return\n # If root has no subject, add omitted subject node.\n if self.G.nodes[cabo.chunks[cabo.root].main]['sub'] == '':\n omitted = CaboChunk(-1, cabo.root)\n omitted.main = \"省略される主体[{0}@{1}]\".format(self.pos, 0)\n omitted.func = \"(省略)\"\n omitted.type = 0\n omitted.pro = 7\n omitted.surface = \"省略される主体\"\n omitted.yomi = \"ショウリャクサレルシュゴ\"\n self._addNode(omitted)\n self._addEdge(omitted.main, cabo.chunks[cabo.root].main, label=\"(省略)主体\", etype=\"sub\")\n self.G.nodes[cabo.chunks[cabo.root].main]['sub'] = omitted.main\n # Add autosub\n for i in range(len(plist)):\n pid = plist[i]\n if cabo.chunks[pid].type in [1, 2] and self.G.nodes[cabo.chunks[pid].main]['sub']== \"\":\n self._addEdge(self.G.nodes[cabo.chunks[cabo.root].main]['sub'], cabo.chunks[pid].main, label=\"主体候補\", etype=\"autosub\")\n self.G.nodes[cabo.chunks[pid].main]['sub'] = self.G.nodes[cabo.chunks[cabo.root].main]['sub']", "def c_code_contiguous(self, node, name, inp, out, sub):\r\n raise theano.gof.utils.MethodNotDefined()", "def addConstrs(self, constrs, name=''):\n ...", "def addnewaccl(zs,ze,ez=0.,ap=0.,ax=0.,ay=0.,ox=0.,oy=0.,xw=0.,sw=0.,\n et=0.,ts=0.,dt=0.,\n time=None,data=None,func=None):\n # --- Make sure that at least some of the element is in the proper range,\n # --- z >= 0., and if zlatperi != 0, z <= zlatperi.\n assert (zs < ze),\"element start must be less than element end\"\n assert (top.zlatperi == 0.) or (ze > 0.),\"element end must be greater than zero if top.zlatperi is nonzero\"\n assert (top.zlatperi == 0.) or (zs < top.zlatperi),\"element start must be less than zlatperi if top.zlatperi is nonzero\"\n\n # --- Get a dict of the input arguments and their values.\n ldict = locals()\n\n # --- Setup the lattice arrays for the insertion of the new element. If\n # --- there are already accls, then find the place where the new one is to\n # --- be inserted and shift the existing data to open up a space.\n # --- Note that this uses that same check as in resetlat, that zs != ze to\n # --- determine whether or not a accl is defined.\n ie = 0\n # --- Find which element the new one goes before.\n while (ie <= top.naccl and top.acclzs[ie] <= zs and\n top.acclzs[ie] != top.acclze[ie]):\n ie = ie + 1\n\n # --- Increase the size of the arrays if the element will go past the end\n # --- or if the array is full (i.e. the last element is used).\n if ie > top.naccl or top.acclzs[-1] != top.acclze[-1]:\n top.naccl = top.naccl + 100\n gchange(\"Lattice\")\n if isinstance(et,(ndarray,collections.Sequence)) and len(et)-1 > top.ntaccl:\n top.ntaccl = len(et) - 1\n gchange(\"Lattice\")\n\n # --- Setup dictionary relating lattice array with input argument names.\n # --- This is done here so that the references to the lattice arrays\n # --- refer to the updated memory locations after the gchange.\n edict={'zs':top.acclzs,'ze':top.acclze,'ez':top.acclez,\n 'ap':top.acclap,'ax':top.acclax,'ay':top.acclay,\n 'ox':top.acclox,'oy':top.accloy,'xw':top.acclxw,'sw':top.acclsw,\n 'et':top.acclet,'ts':top.acclts,'dt':top.accldt}\n\n # --- Shift the existing data in the arrays to open up a space for the\n # --- new element.\n if ie <= top.naccl:\n for e in edict.itervalues():\n if len(shape(e)) == 1:\n e[ie+1:] = e[ie:-1] + 0\n else:\n # --- acclet is 2-D\n e[:,ie+1:] = e[:,ie:-1] + 0\n\n # --- Insert the new element. Note that edict correlates the lattice array\n # --- with the input arguments and ldict correlate the arguements with\n # --- their values.\n for (xx,e) in edict.iteritems():\n if len(shape(e)) == 1:\n e[ie] = ldict[xx]\n else:\n # --- acclet is 2-D\n e[:,ie] = ldict[xx]\n\n # --- resetlat must be called before the data can be used\n top.lresetlat = true\n\n if (time is not None and data is not None) or func is not None:\n tdle = TimeDependentLatticeElement('acclez',ie,time,data,func)\n return ie,tdle\n\n return ie", "def conc_after(V, C_e, Q, t, t_e):\n C = 0.21-(0.21-C_e)*math.e**-(abs(Q)/V*(t-t_e))\n return C", "def Ev_ccs(ccs_coord, ccs_span, vacancy_index, **kwargs):\n if 'QMInp' not in kwargs:\n qtk.exit(\"kwargs: 'QMInp' is missing.\\n\"\\\n + \"It should be set to QMInp object of \"\\\n + \"system without vacancies.\\n\"\\\n + \"It is necessary for inp settings\")\n base_inp = kwargs['QMInp']\n\n qm_setting = {}\n if 'qm_setting' in kwargs:\n qm_setting = kwargs['qm_setting']\n\n if 'pref' in kwargs and 'vref' in kwargs:\n alchem = True\n perfect_ref = kwargs['pref']\n vacancy_ref = kwargs['vref']\n elif 'pref' not in kwargs and 'vref' not in kwargs:\n alchem = False\n\n freeE = qtk.QMOut('freeAtom/freeAtom.out')\n freeE.inUnit('ev')\n\n if 'threads' in kwargs:\n _threads = kwargs['threads']\n else:\n _threads = 1\n\n inp_wov = qtk.QMInp(ccs_span.generate(**ccs_coord))\n inp_wv = qtk.QMInp(ccs_span.generate(**ccs_coord))\n\n inp_wv.removeAtoms(vacancy_index)\n inp_wv.setChargeMultiplicity(0, 2)\n\n perfect = 'ev_perfect' + str(os.getpid())\n vacancy = 'ev_vacancy' + str(os.getpid())\n perfectinp = perfect + '.inp'\n vacancyinp = vacancy + '.inp'\n inp_wov.molecule.name = perfectinp\n inp_wv.molecule.name = vacancyinp\n\n if os.path.exists(perfect):\n shutil.rmtree(perfect)\n if os.path.exists(vacancy):\n shutil.rmtree(vacancy)\n\n print ccs_coord\n if alchem:\n out_wov = qtk.Al1st(inp_wov, ref_dir=perfect_ref, **qm_setting)\n out_wv = qtk.Al1st(inp_wv, ref_dir=vacancy_ref, **qm_setting)\n else:\n out_wov = inp_wov.run(**qm_setting)\n out_wv = inp_wv.run(**qm_setting)\n try:\n os.remove(perfectinp)\n os.remove(vacancyinp)\n except OSError:\n shutil.rmtree(perfectinp)\n shutil.rmtree(vacancyinp)\n\n out_wov.inUnit('ev')\n out_wv.inUnit('ev')\n\n final = out_wov - out_wv - freeE\n\n msg = str(out_wov.Et) + '-(' + str(out_wv.Et) + \\\n '+' + str(freeE.Et) + ') = ' + str(final.Et)\n qtk.report('trial Ev', msg)\n\n return final.Et", "def conj(self):\n return np.conj(self)", "def match_contract_to_charter_constraints(contract, charter, charter_constraints, charity_constraints):\n\n r_quotes = []\n r_vector = []\n\n quote_slice = slice(0, 17)\n\n if 'subj' not in contract.sections:\n raise ValueError(\"contract has no subject section\")\n\n subj = contract.sections['subj'].body\n print(subj.untokenize_cc())\n print('------')\n if subj.embeddings is None:\n print(\"Subj embeddings are gone, restoring...\")\n subj.embeddings = contract.embeddings[subj.start:subj.end]\n # subj.tokens = doc.tokens[subj.start:subj.end]\n # subj.tokens_cc = doc.tokens_cc[subj.start:subj.end]\n # subj.embedd( GLOBALS__['CharterAnlysingContext'].pattern_factory )\n print('\\t\\t sample:', subj.embeddings[0][1:10])\n\n for head_type in charter_constraints:\n\n ##charity:\n if head_type in charity_constraints:\n print(f'{head_type} has charity constrinats')\n \n charity_constraints_by_head = charity_constraints[head_type]\n charity_constraints_by_head_new = []\n \n charity_constraints['new.'+head_type] = charity_constraints_by_head_new\n \n for i in range(len(charity_constraints_by_head)):\n _tuple = charity_constraints_by_head[i] \n# for cc in charity_constraints[head_type]:\n _slice = _tuple[0]\n emb_charter = charter.sections[head_type].body.embeddings[_slice]\n \n distance = 1 - DF(emb_charter, subj.embeddings[5:])\n \n# cc.add['subj_correlation'] = distance\n \n# detupling\n charity_constraints_by_head_new.append ( {\n 'slice':_slice,\n 'subj_correlation': distance,\n 'confidence': _tuple[1],\n 'sum': _tuple[2]\n })\n \n print('\\t'*4, 'cc=', charity_constraints_by_head_new[i])\n \n # print('\\t\\t---CC', cc[0])\n \n\n # GLOBALS__['CharterAnlysingContext'].doc.sections['head.directors'].body.embeddings[_slice]\n\n ##------------------------charity end\n print(f'measuring {head_type} constraints...'.upper())\n cc = charter_constraints[head_type]\n quotes = cc['sentences']\n for quote in quotes:\n print()\n _q = untokenize(quote['subdoc'].tokens_cc[quote_slice])\n print(_q)\n\n distance = 1 - DF(quote['subdoc'].embeddings[quote_slice],\n subj.embeddings[5:])\n\n quote['subj_correlation'] = distance\n\n print(f'distance = {distance:.4f}')\n\n r_quotes.append(_q)\n r_vector.append(distance)\n r_quotes.append('\\n')\n r_vector.append(distance)\n\n GLOBALS__['renderer'].render_color_text(r_quotes, r_vector)\n print(r_vector)\n print(r_quotes)", "def placeConcentrationInto(self, region, conc):\n self._sim.distributeConcentration(self, region, conc)\n return self", "def conj(self):\n out = empty((self._size, *self.shape[1:]), self.dtype)\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n np.conjugate(self[self._begin:], out[:k])\n np.conjugate(self[:self._end], out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n np.conjugate(part, out)\n\n return(out.view(ndarray))" ]
[ "0.86378384", "0.75416327", "0.7458773", "0.7380015", "0.73679143", "0.646462", "0.6225315", "0.51709515", "0.5064763", "0.48974988", "0.48840624", "0.48815274", "0.48780966", "0.4864251", "0.48559266", "0.47979966", "0.4756608", "0.4728465", "0.47282267", "0.4699578", "0.46717963", "0.4667326", "0.46460378", "0.46260604", "0.4619928", "0.46079183", "0.4607213", "0.46067777", "0.45833543", "0.45797583" ]
0.86945903
0
Changes the bounds for one constraint. chgconbound(self,i_,lower_,finite_,value_)
def chgconbound(self,i_,lower_,finite_,value_): res = __library__.MSK_XX_chgconbound(self.__nativep,i_,lower_,finite_,value_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chgconbound(self,i_,lower_,finite_,value_): # 3\n res = self.__obj.chgconbound(i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def chgbound(self,accmode_,i_,lower_,finite_,value_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res = self.__obj.chgbound(accmode_,i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def chgvarbound(self,j_,lower_,finite_,value_): # 3\n res = self.__obj.chgvarbound(j_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def chgvarbound(self,j_,lower_,finite_,value_):\n res = __library__.MSK_XX_chgvarbound(self.__nativep,j_,lower_,finite_,value_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconbound(self,i_,bkc_,blc_,buc_):\n res = __library__.MSK_XX_putconbound(self.__nativep,i_,bkc_,blc_,buc_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _manage_infinity_bounds(problem, _bounds, get_bound, set_bound):\n for variable in problem.variables:\n expr_bounds = get_bound(variable)\n lower_bound = expr_bounds.lower_bound\n upper_bound = expr_bounds.upper_bound\n\n if is_inf(lower_bound):\n new_lower_bound = None\n else:\n new_lower_bound = lower_bound\n\n if is_inf(upper_bound):\n new_upper_bound = None\n else:\n new_upper_bound = upper_bound\n\n set_bound(variable, Interval(new_lower_bound, new_upper_bound))", "def _initialize_bounds(problem, bounds, get_bound, set_bound):\n for constraint in problem.constraints:\n root_expr = constraint.root_expr\n expr_bounds = Interval(constraint.lower_bound, constraint.upper_bound)\n if root_expr not in bounds:\n set_bound(root_expr, expr_bounds)\n else:\n existing_bounds = get_bound(root_expr)\n new_bounds = existing_bounds.intersect(expr_bounds)\n set_bound(root_expr, new_bounds)", "def RestrictionRangeBound(self, compsIdList, lowerBound, upperBound):\n for i in range(len(compsIdList)): compsIdList[i] -= 1\n if self.solverTypeOptimize:\n self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) >= lowerBound)\n else:\n self.solver.assert_and_track(\n PbGe(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n lowerBound), \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1\n if self.solverTypeOptimize:\n PbLe(self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n upperBound))\n else:\n self.solver.assert_and_track(\n sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) <= upperBound, \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1", "def putconbound(self,i_,bk_,bl_,bu_): # 3\n if not isinstance(bk_,boundkey): raise TypeError(\"Argument bk has wrong type\")\n res = self.__obj.putconbound(i_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getconbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getconbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)", "def set_concentration_boundaries(self, boundary_marker, concentration_function):\n self.concentration_boundaries[boundary_marker] = concentration_function", "def SetPRBinCatConstraint( self, model ) :\n tot = np.dot( self.wish.T, self.dispo )\n for val in tot :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def update_i_bnds(self):\n\n # Get old and new boundaries.\n i_bnds_old = self.i_bounds\n i_bnds_new = self._get_i_bnds()\n\n for i_order in range(self.n_orders):\n\n # Take most restrictive lower bound.\n low_bnds = [i_bnds_new[i_order][0], i_bnds_old[i_order][0]]\n i_bnds_new[i_order][0] = np.max(low_bnds)\n\n # Take most restrictive upper bound.\n up_bnds = [i_bnds_new[i_order][1], i_bnds_old[i_order][1]]\n i_bnds_new[i_order][1] = np.min(up_bnds)\n\n # Update attribute.\n self.i_bounds = i_bnds_new\n\n return", "def set_bounds(self, new_bounds):\n\n # Update the internal object stored dict\n self.pbounds.update(new_bounds)\n\n # Loop through the all bounds and reset the min-max bound matrix\n for row, key in enumerate(self.pbounds.keys()):\n\n # Reset all entries, even if the same.\n self.bounds[row] = self.pbounds[key]", "def _process_individual_bound(self, val):\n if(val not in [True, False]):\n raise ValueError('For composition bounds expected are iether True' \n '(free function) or False (fixed function) not %s' % (str(val)))\n return val", "def apply_bounds(self, column_name, lower_bound=-np.inf,\n upper_bound=np.inf):\n self.check_for_column(column_name)\n\n if lower_bound is None:\n lower_bound = -np.inf\n if upper_bound is None:\n upper_bound = np.inf\n column = self.data[column_name]\n self.data[column_name] = column.clip(lower_bound, upper_bound)", "def set_constraint(self, g, g_min, g_max):\n self.g += g\n self.g_min += g_min\n self.g_max += g_max", "def _set_constraint(self):\n pass", "def putconboundsliceconst(self,first_,last_,bkc_,blc_,buc_):\n res = __library__.MSK_XX_putconboundsliceconst(self.__nativep,first_,last_,bkc_,blc_,buc_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def SetPRBinConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for val in tot :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def getconbound(self,i_): # 3\n res,resargs = self.__obj.getconbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def bounds(self, new_bounds: devices.PrimaryBounds) -> None:\n self._assert_bounds_are_valid(new_bounds)\n self._bounds = list(new_bounds)", "def _onSetParameterLower(self, value):\n self._parameters['lower'] = min(value, self._parameters['upper']) # Limit at upper\n self._logger.info(\"Parameter 'lower' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)", "def SetPRCatConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for line in tot :\n for val in line :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def update_upper_bounds(self, B):\n for arc in self.arcs():\n if self.arc_info[arc[0]]['upper_bound'] == -1:\n self.arc_info[arc[0]]['upper_bound'] = B", "def constraint(self, c):\n self.add_constraint(c)", "def set_bounds(\n self: A,\n lower: BoundValue = None,\n upper: BoundValue = None,\n method: str = \"clipping\",\n full_range_sampling: bool = False,\n a_min: BoundValue = None,\n a_max: BoundValue = None,\n ) -> A: # TODO improve description of methods\n lower, upper = _a_min_max_deprecation(**locals())\n bounds = tuple(a if isinstance(a, np.ndarray) or a is None else np.array([a], dtype=float) for a in (lower, upper))\n both_bounds = all(b is not None for b in bounds)\n # preliminary checks\n if self.bound_transform is not None:\n raise RuntimeError(\"A bounding method has already been set\")\n if full_range_sampling and not both_bounds:\n raise ValueError(\"Cannot use full range sampling if both bounds are not set\")\n checker = BoundChecker(*bounds)\n if not checker(self.value):\n raise ValueError(\"Current value is not within bounds, please update it first\")\n if not (lower is None or upper is None):\n if (bounds[0] >= bounds[1]).any(): # type: ignore\n raise ValueError(f\"Lower bounds {lower} should be strictly smaller than upper bounds {upper}\")\n # update instance\n transforms = dict(clipping=trans.Clipping, arctan=trans.ArctanBound, tanh=trans.TanhBound)\n if method in transforms:\n if self.exponent is not None and method != \"clipping\":\n raise ValueError(f'Cannot use method \"{method}\" in logarithmic mode')\n self.bound_transform = transforms[method](*bounds)\n elif method == \"constraint\":\n self.register_cheap_constraint(checker)\n else:\n raise ValueError(f\"Unknown method {method}\")\n self.bounds = bounds # type: ignore\n self.full_range_sampling = full_range_sampling\n # warn if sigma is too large for range\n if both_bounds and method != \"tanh\": # tanh goes to infinity anyway\n std_bounds = tuple(self._to_reduced_space(b) for b in self.bounds) # type: ignore\n min_dist = np.min(np.abs(std_bounds[0] - std_bounds[1]).ravel())\n if min_dist < 3.0:\n warnings.warn(f\"Bounds are {min_dist} sigma away from each other at the closest, \"\n \"you should aim for at least 3 for better quality.\")\n return self", "def fixC(self,i,value):\n if self.coeffPattern[2] == None:\n m,n=self.m,self.n\n self.coeffPattern[2] = [None]*m\n self.coeffPattern[2][i]=value\n self._updateEstimatorSize(i)" ]
[ "0.9179256", "0.7916872", "0.7376792", "0.7195413", "0.6016417", "0.6004763", "0.5817389", "0.5724509", "0.5707932", "0.56736284", "0.5642968", "0.5613745", "0.5549648", "0.5543372", "0.55391896", "0.5501184", "0.5480884", "0.5466385", "0.5455747", "0.54376006", "0.543686", "0.5429047", "0.53966075", "0.5349046", "0.5345001", "0.5336194", "0.531211", "0.53000784", "0.52504855", "0.5246634" ]
0.8795833
1
Changes the bounds for one variable. chgvarbound(self,j_,lower_,finite_,value_)
def chgvarbound(self,j_,lower_,finite_,value_): res = __library__.MSK_XX_chgvarbound(self.__nativep,j_,lower_,finite_,value_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chgvarbound(self,j_,lower_,finite_,value_): # 3\n res = self.__obj.chgvarbound(j_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def chgconbound(self,i_,lower_,finite_,value_): # 3\n res = self.__obj.chgconbound(i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarbound(self,j_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarbound(self.__nativep,j_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarbound(self,j_,bk_,bl_,bu_): # 3\n if not isinstance(bk_,boundkey): raise TypeError(\"Argument bk has wrong type\")\n res = self.__obj.putvarbound(j_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def chgbound(self,accmode_,i_,lower_,finite_,value_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res = self.__obj.chgbound(accmode_,i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _manage_infinity_bounds(problem, _bounds, get_bound, set_bound):\n for variable in problem.variables:\n expr_bounds = get_bound(variable)\n lower_bound = expr_bounds.lower_bound\n upper_bound = expr_bounds.upper_bound\n\n if is_inf(lower_bound):\n new_lower_bound = None\n else:\n new_lower_bound = lower_bound\n\n if is_inf(upper_bound):\n new_upper_bound = None\n else:\n new_upper_bound = upper_bound\n\n set_bound(variable, Interval(new_lower_bound, new_upper_bound))", "def chgconbound(self,i_,lower_,finite_,value_):\n res = __library__.MSK_XX_chgconbound(self.__nativep,i_,lower_,finite_,value_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)", "def set_bounds(self, new_bounds):\n\n # Update the internal object stored dict\n self.pbounds.update(new_bounds)\n\n # Loop through the all bounds and reset the min-max bound matrix\n for row, key in enumerate(self.pbounds.keys()):\n\n # Reset all entries, even if the same.\n self.bounds[row] = self.pbounds[key]", "def getvarbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getvarbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)", "def putvarboundsliceconst(self,first_,last_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarboundsliceconst(self.__nativep,first_,last_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def set_bounds_atom(self,bounds):\n assert bounds.shape == (2,self.Phi.d)\n self.bounds = bounds # data bounds\n self.bounds_atom = bounds.T.tolist()\n for i in range(self.Phi.d): # bounds for the variance in each dimension\n max_variance_this_dimension = (bounds[1][i]-bounds[0][i])**2\n self.bounds_atom.append([self.variance_relative_lowerbound*max_variance_this_dimension,\n self.variance_relative_upperbound*max_variance_this_dimension])", "def getvarbound(self,i_): # 3\n res,resargs = self.__obj.getvarbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def test_basic_handling_range_with_variable_bounds(spark_ctx):\n\n dr = Drudge(spark_ctx)\n\n j1, j2 = symbols('j1 j2')\n m1, m2 = symbols('m1, m2')\n j_max = symbols('j_max')\n j = Range('j', 0, j_max)\n m = Range('m')\n dr.set_dumms(j, [j1, j2])\n dr.set_dumms(m, [m1, m2])\n\n v = Vec('v')\n x = IndexedBase('x')\n tensor = dr.sum((j2, j), (m2, m[0, j2]), x[j2, m2] * v[j2, m2])\n\n reset = tensor.reset_dumms()\n assert reset.n_terms == 1\n term = reset.local_terms[0]\n assert len(term.sums) == 2\n if term.sums[0][1].label == 'j':\n j_sum, m_sum = term.sums\n else:\n m_sum, j_sum = term.sums\n assert j_sum[0] == j1\n assert j_sum[1].args == j.args\n assert m_sum[0] == m1\n assert m_sum[1].label == 'm'\n assert m_sum[1].lower == 0\n assert m_sum[1].upper == j1 # Important!\n assert term.amp == x[j1, m1]\n assert term.vecs == (v[j1, m1],)\n\n # Test that functions can be mapped to the bounds.\n repled = reset.map2scalars(\n lambda x: x.xreplace({j_max: 10}), skip_ranges=False\n )\n assert repled.n_terms == 1\n term = repled.local_terms[0]\n checked = False\n for _, i in term.sums:\n if i.label == 'j':\n assert i.lower == 0\n assert i.upper == 10\n checked = True\n continue\n assert checked", "def set_bounds_atom(self,bounds):\n self.bounds = bounds # data bounds\n raise NotImplementedError\n self.bounds_atom = None\n return None", "def set_bounds_atom(self,bounds):\n assert bounds.shape == (2,self.Phi.d)\n self.bounds = bounds # data bounds\n self.bounds_atom = bounds.T.tolist()", "def set_in_bounds(self,obj,val):\n if not callable(val):\n bounded_val = self.crop_to_bounds(val)\n else:\n bounded_val = val\n super(Number,self).__set__(obj,bounded_val)", "def _initialize_bounds(problem, bounds, get_bound, set_bound):\n for constraint in problem.constraints:\n root_expr = constraint.root_expr\n expr_bounds = Interval(constraint.lower_bound, constraint.upper_bound)\n if root_expr not in bounds:\n set_bound(root_expr, expr_bounds)\n else:\n existing_bounds = get_bound(root_expr)\n new_bounds = existing_bounds.intersect(expr_bounds)\n set_bound(root_expr, new_bounds)", "def extend_bounds(problem):\n\n num_vars = problem[\"num_vars\"]\n num_ff_vars = 2 ** find_smallest(num_vars)\n num_dummy_variables = num_ff_vars - num_vars\n\n bounds = list(problem[\"bounds\"])\n names = problem[\"names\"]\n if num_dummy_variables > 0:\n bounds.extend([[0, 1] for x in range(num_dummy_variables)])\n names.extend([\"dummy_\" + str(var) for var in range(num_dummy_variables)])\n problem[\"bounds\"] = bounds\n problem[\"names\"] = names\n problem[\"num_vars\"] = num_ff_vars\n\n return problem", "def change_state_bounds(self, state, new_ub, new_lb, slack, comp=None,\n node=None):\n # TODO Adapt method so you can change only one of the settings?\n # TODO Put None as default parameter value and detect if other value is supplied\n comp_obj = self.get_component(comp, node)\n\n comp_obj.params[state].change_upper_bound(new_ub)\n comp_obj.params[state].change_lower_bound(new_lb)\n comp_obj.params[state].change_slack(slack)", "def write_bounds(self):\n optimized_par_df = \\\n self.parameter_df.loc[self.parameter_df.estimate == 1\n & (~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())), :]\n self.f.require_dataset('/parameters/lowerBound',\n shape=optimized_par_df.lowerBound.shape,\n data=optimized_par_df.lowerBound, dtype='f8')\n self.f.require_dataset('/parameters/upperBound',\n shape=optimized_par_df.upperBound.shape,\n data=optimized_par_df.upperBound, dtype='f8')", "def putvarboundlistconst(self,sub_,bkx_,blx_,bux_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n res = __library__.MSK_XX_putvarboundlistconst(self.__nativep,num_,_sub_tmp,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def incBound(self):\n # increment the bound for the local variables.\n self.incVariableList()\n super().incBound()\n\n # get the last variable.\n idx = len(self.stateNormalPath) - 1\n assert(idx > 0)\n\n # we add the constraints that specify the id of the transition\n self.addConstraintOnIdTransition(idx)", "def atvar(a,limits=None,inclusive=(1,1)):\r\n a = a.astype(N.float_)\r\n if limits == None or limits == [None,None]:\r\n return avar(a)\r\n assert type(limits) in [ListType,TupleType,N.ndarray], \"Wrong type for limits in atvar\"\r\n if inclusive[0]: lowerfcn = N.greater_equal\r\n else: lowerfcn = N.greater\r\n if inclusive[1]: upperfcn = N.less_equal\r\n else: upperfcn = N.less\r\n if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):\r\n raise ValueError, \"No array values within given limits (atvar).\"\r\n elif limits[0]==None and limits[1]<>None:\r\n mask = upperfcn(a,limits[1])\r\n elif limits[0]<>None and limits[1]==None:\r\n mask = lowerfcn(a,limits[0])\r\n elif limits[0]<>None and limits[1]<>None:\r\n mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])\r\n\r\n a = N.compress(mask,a) # squish out excluded values\r\n return avar(a)", "def _onSetParameterBIgnoreBounds(self, value):\n self._parameters['b'] = value\n self._logger.info(\"Parameter 'b' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def _process_individual_bound(self, val):\n if(val not in [True, False]):\n raise ValueError('For composition bounds expected are iether True' \n '(free function) or False (fixed function) not %s' % (str(val)))\n return val", "def _onSetParameterB(self, value):\n self._parameters['b'] = min(max(value, self._parameters['lower']), self._parameters['upper']) # Limit at upper and lower\n self._logger.info(\"Parameter ba' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def _onSetParameterUpper(self, value):\n self._parameters['upper'] = max(value, self._parameters['lower']) # Limit at lower\n self._logger.info(\"Parameter 'upper' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def update(self, function_values, es, bounds=None):\r\n if bounds is None:\r\n bounds = self.bounds\r\n if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty\r\n return self # len(function_values) * [0.0] # case without voilations\r\n\r\n N = es.N\r\n ### prepare\r\n # compute varis = sigma**2 * C_ii\r\n varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case\r\n es.C if np.isscalar(es.C[0]) else # diagonal matrix case\r\n [es.C[i][i] for i in xrange(N)])) # full matrix case\r\n\r\n # dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5\r\n dmean = (es.mean - es.gp.geno(es.gp.into_bounds(es.gp.pheno(es.mean)))) / varis**0.5\r\n\r\n ### Store/update a history of delta fitness value\r\n fvals = sorted(function_values)\r\n l = 1 + len(fvals)\r\n val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation\r\n val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration\r\n # insert val in history\r\n if np.isfinite(val) and val > 0:\r\n self.hist.insert(0, val)\r\n elif val == inf and len(self.hist) > 1:\r\n self.hist.insert(0, max(self.hist))\r\n else:\r\n pass # ignore 0 or nan values\r\n if len(self.hist) > 20 + (3*N) / es.popsize:\r\n self.hist.pop()\r\n\r\n ### prepare\r\n dfit = np.median(self.hist) # median interquartile range\r\n damp = min(1, es.sp.mueff/10./N)\r\n\r\n ### set/update weights\r\n # Throw initialization error\r\n if len(self.hist) == 0:\r\n raise _Error('wrongful initialization, no feasible solution sampled. ' +\r\n 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +\r\n 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')\r\n # initialize weights\r\n if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO\r\n self.gamma = array(N * [2*dfit])\r\n self.weights_initialized = True\r\n # update weights gamma\r\n if self.weights_initialized:\r\n edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))\r\n if 1 < 3: # this is better, around a factor of two\r\n # increase single weights possibly with a faster rate than they can decrease\r\n # value unit of edst is std dev, 3==random walk of 9 steps\r\n self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp\r\n # decrease all weights up to the same level to avoid single extremely small weights\r\n # use a constant factor for pseudo-keeping invariance\r\n self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp\r\n # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)\r\n elif 1 < 3 and (edist>0).any(): # previous method\r\n # CAVE: min was max in TEC 2009\r\n self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)\r\n # max fails on cigtab(N=12,bounds=[0.1,None]):\r\n # self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?\r\n # self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)\r\n else: # alternative version, but not better\r\n solutions = es.pop # this has not been checked\r\n r = self.feasible_ratio(solutions) # has to be the averaged over N iterations\r\n self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)\r\n es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]\r\n ### return penalty\r\n # es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]\r\n return self # bound penalty values\r" ]
[ "0.92657965", "0.6645805", "0.66179705", "0.63044393", "0.6285839", "0.6257946", "0.62345994", "0.61521727", "0.57639956", "0.56784153", "0.5577765", "0.5573649", "0.54211104", "0.5403837", "0.53660214", "0.52223015", "0.52110463", "0.5164623", "0.51575583", "0.51482254", "0.51427233", "0.513306", "0.5117852", "0.51147217", "0.511384", "0.5095138", "0.50924027", "0.50913715", "0.5057961", "0.50448924" ]
0.8982534
1
Obtains a single coefficient in linear constraint matrix. getaij(self,i_,j_)
def getaij(self,i_,j_): aij_ = ctypes.c_double() res = __library__.MSK_XX_getaij(self.__nativep,i_,j_,ctypes.byref(aij_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) aij_ = aij_.value _aij_return_value = aij_ return (_aij_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getaij(self,i_,j_): # 3\n res,resargs = self.__obj.getaij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _aij_return_value = resargs\n return _aij_return_value", "def getqobjij(self,i_,j_): # 3\n res,resargs = self.__obj.getqobjij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _qoij_return_value = resargs\n return _qoij_return_value", "def _get_jacobian(self):\n srcs, recs = self.srcs, self.recs\n if not self.sparse:\n jac = numpy.array(\n [ttime2d.straight([cell], '', srcs, recs, velocity=1.)\n for cell in self.mesh]).T\n else:\n shoot = ttime2d.straight\n nonzero = []\n extend = nonzero.extend\n for j, c in enumerate(self.mesh):\n extend((i, j, tt)\n for i, tt in enumerate(shoot([c], '', srcs, recs,\n velocity=1.))\n if tt != 0)\n row, col, val = numpy.array(nonzero).T\n shape = (self.ndata, self.nparams)\n jac = scipy.sparse.csr_matrix((val, (row, col)), shape)\n return jac", "def getcj(self,j_):\n cj_ = ctypes.c_double()\n res = __library__.MSK_XX_getcj(self.__nativep,j_,ctypes.byref(cj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n cj_ = cj_.value\n _cj_return_value = cj_\n return (_cj_return_value)", "def getitem(self, i, j):\n # XXX: flint matrices do not support negative indices\n # XXX: They also raise ValueError instead of IndexError\n m, n = self.shape\n if i < 0:\n i += m\n if j < 0:\n j += n\n try:\n return self.rep[i, j]\n except ValueError:\n raise IndexError(f\"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}\")", "def getcj(self,j_): # 3\n res,resargs = self.__obj.getcj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _cj_return_value = resargs\n return _cj_return_value", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def getqobjij(self,i_,j_):\n qoij_ = ctypes.c_double()\n res = __library__.MSK_XX_getqobjij(self.__nativep,i_,j_,ctypes.byref(qoij_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n qoij_ = qoij_.value\n _qoij_return_value = qoij_\n return (_qoij_return_value)", "def jmat(ind: int):\n return _jm[ind - 1]", "def jac(self):\n return self.unit_jac if self._jac is None else self._jac", "def a_ij(s, p, i=1, j=1): # (Validated)\n from math import sqrt\n if i == j:\n return s.c[i]['a'] # Return pure paramater\n else: # find mixture aij i =/= j\n return (1 - p.m['k'][i][j]) * sqrt(s.c[i]['a'] * s.c[j]['a'])", "def __get_p_i_j():\n try:\n lambdas_r = self.lambdas_r\n lambdas_c = self.lambdas_c\n r_indexer = self.r_indexer\n c_indexer = self.c_indexer\n value = self.data.adjacencymat[i, j]\n sum_value = (lambdas_r[r_indexer[i]] + lambdas_c[c_indexer[j]])\n except AttributeError:\n self.lambdas_r, self.lambdas_c, self.r_indexer, self.c_indexer, _, _ = self.compute_lambdas_in_a_cooler_way(**kwargs)\n value = self.data.adjacencymat[i, j]\n sum_value = self.lambdas_r[self.r_indexer[i]] + self.lambdas_c[self.c_indexer[j]]\n except IndexError:\n raise\n return value, sum_value", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def get_cell(self, i, j):\n return self._env[i][j]", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def GetJ(self, *args):\n return _table.Table_GetJ(self, *args)", "def c(self, i):\n value = self.b(i)\n if i == self.N:\n return value\n else:\n for j in range(i+1, self.N+1):\n value = value - self.a(i,j) * self.C[j]\n return value", "def getGradient(self,j):\n i = int(self.indicator['term'][j])\n r = int(self.indicator['row'][j])\n c = int(self.indicator['col'][j])\n rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])\n return rv", "def j_nc_from_j(self, j, inverse=False, check_bounds=False):\n if not inverse:\n if check_bounds:\n assert_between(j, 0, self._grid_shape[1]-1)\n j_nc = self._nc_j0 + j * self._nc_jskip\n if check_bounds:\n assert_between(j_nc, 0, self._nc_xdim)\n return j_nc\n else:\n j_nc = j\n if check_bounds:\n assert_between(j_nc, 0, self._nc_ydim)\n j = (j_nc - self._nc_j0)/self._nc_jskip\n if check_bounds:\n assert_between(j, 0, self._grid_shape[1]-1)\n return j", "def rule(model, i, j):\n return 1, model.T0_end[i, j] + (1-model.A[i, j]), None", "def get_elem (A, i, j):\n\treturn A[j][i]", "def c(self,j,i_j):\n \"\"\" The index j of the chains goes from 0 to k-1 (where k is the \n number of chains in our decomposition \"\"\"\n assert j < len(self.lcd), \"j must be the index of a chain\"\n \"\"\" The index i_j goes from 0 to len(lcd[j]) this range is one longer\n than the length of the chain because we go from {} to the full chain. \"\"\"\n assert i_j <= self.lcd_dims[j], \"i_j = {}, dims[j] = {}\".format(i_j, self.lcd_dims[j])\n if i_j == 0:\n return None\n else:\n return self.lcd[j][i_j-1]", "def acoeff(self):\n return np.dot(self.mmi,np.dot(self.mmatrix.T,self.bvec))", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def _jac_im_getter(self, component: str, surface_shape: tuple, periodic: bool, *im_args, **im_kwargs):\n\n inf_comp = self.influence_matrix(component, *im_args, **im_kwargs)[component]\n influence_martix_span = inf_comp.shape\n if periodic:\n # check that the surface shape is odd in both dimensions\n if not all([el % 2 for el in surface_shape]):\n raise ValueError(\"Surface shape must be odd in both dimensions for periodic surfaces\")\n # trim the influence matrix if necessary\n dif = [int((ims - ss) / 2) for ims, ss in zip(influence_martix_span, surface_shape)]\n if dif[0] > 0:\n inf_comp = inf_comp[dif[0]:-1 * dif[0], :]\n if dif[1] > 0:\n inf_comp = inf_comp[:, dif[1]:-1 * dif[1]]\n trimmed_ims = inf_comp.shape\n # pad to the same shape as the surface (this is why it has to be odd size)\n inf_mat = np.pad(inf_comp, ((0, surface_shape[0] - trimmed_ims[0]),\n (0, surface_shape[1] - trimmed_ims[1])), mode='constant')\n inf_mat = np.roll(inf_mat, (-1 * int(trimmed_ims[0] / 2), -1 * int(trimmed_ims[1] / 2)),\n axis=[0, 1]).flatten()\n jac_comp = []\n roll_num = 0\n # roll the influence matrix to fill in rows of the jacobian\n for n in range(surface_shape[0]):\n for m in range(surface_shape[1]):\n jac_comp.append(np.roll(inf_mat, roll_num))\n roll_num += 1\n jac_comp = np.asarray(jac_comp)\n\n else: # not periodic\n pad_0 = int(surface_shape[0] - np.floor(influence_martix_span[0] / 2))\n pad_1 = int(surface_shape[1] - np.floor(influence_martix_span[1] / 2))\n if pad_0 < 0:\n inf_comp = inf_comp[-1 * pad_0:pad_0, :]\n pad_0 = 0\n if pad_1 < 0:\n inf_comp = inf_comp[:, -1 * pad_1:pad_1]\n pad_1 = 0\n inf_mat = np.pad(inf_comp, ((pad_0, pad_0), (pad_1, pad_1)), mode='constant')\n jac_comp = []\n idx_0 = 0\n for n in range(surface_shape[0]):\n idx_1 = 0\n for m in range(surface_shape[1]):\n jac_comp.append(inf_mat[surface_shape[0] - idx_0:2 * surface_shape[0] - idx_0,\n surface_shape[1] - idx_1:2 * surface_shape[1] - idx_1].copy().flatten())\n idx_1 += 1\n idx_0 += 1\n jac_comp = np.asarray(jac_comp)\n\n return jac_comp", "def Read_IJMatrix(self, comm, fname):\n return _hypre.HypreParMatrix_Read_IJMatrix(self, comm, fname)", "def jacobian(self, x):\n pass", "def inverseIntermediateJac(self,x):\n \n Ri = self._rotation.T\n si = (1./self._scaled).reshape((1,1,self._dim))\n \n Jac = self.intermediateJacPol2Rot(x)\n \n #Ri.J\n Jac = np.einsum(\"jk,ikl->ijl\",Ri,Jac)\n #(Ri.J).diag(si)\n Jac *= si\n \n return Jac", "def jacobian(self, c):\n\n raise NotImplementedError", "def coefficients_from_j(j, minimal_twist=True):\n try:\n K = j.parent()\n except AttributeError:\n K = rings.RationalField()\n if K not in _Fields:\n K = K.fraction_field()\n\n char = K.characteristic()\n if char == 2:\n if j == 0:\n return Sequence([0, 0, 1, 0, 0], universe=K)\n else:\n return Sequence([1, 0, 0, 0, 1/j], universe=K)\n if char == 3:\n if j == 0:\n return Sequence([0, 0, 0, 1, 0], universe=K)\n else:\n return Sequence([0, j, 0, 0, -j**2], universe=K)\n\n if K is rings.RationalField():\n # we construct the minimal twist, i.e. the curve with minimal\n # conductor with this j_invariant:\n if j == 0:\n return Sequence([0, 0, 1, 0, 0], universe=K) # 27a3\n if j == 1728:\n return Sequence([0, 0, 0, -1, 0], universe=K) # 32a2\n\n if not minimal_twist:\n k = j-1728\n return Sequence([0, 0, 0, -3*j*k, -2*j*k**2], universe=K)\n\n n = j.numerator()\n m = n-1728*j.denominator()\n a4 = -3*n*m\n a6 = -2*n*m**2\n\n # Now E=[0,0,0,a4,a6] has j-invariant j=n/d\n from sage.sets.set import Set\n for p in Set(n.prime_divisors()+m.prime_divisors()):\n e = min(a4.valuation(p)//2, a6.valuation(p)//3)\n if e & gt\n 0:\n p = p**e\n a4 /= p**2\n a6 /= p**3\n\n # Now E=[0,0,0,a4,a6] is minimal at all p != 2,3\n tw = [-1, 2, -2, 3, -3, 6, -6]\n E1 = EllipticCurve([0, 0, 0, a4, a6])\n Elist = [E1] + [E1.quadratic_twist(t) for t in tw]\n Elist.sort(key=lambda E: E.conductor())\n return Sequence(Elist[0].ainvs())\n\n # defaults for all other fields:\n if j == 0:\n return Sequence([0, 0, 0, 0, 1], universe=K)\n if j == 1728:\n return Sequence([0, 0, 0, 1, 0], universe=K)\n k = j-1728\n return Sequence([0, 0, 0, -3*j*k, -2*j*k**2], universe=K)" ]
[ "0.7493986", "0.64376247", "0.6392298", "0.6329551", "0.6308506", "0.6269593", "0.6261952", "0.6139124", "0.61024755", "0.61000454", "0.60522926", "0.5945203", "0.5922942", "0.59190106", "0.5905818", "0.5891535", "0.5806236", "0.578032", "0.57758945", "0.5755988", "0.572439", "0.5722308", "0.57164514", "0.5714595", "0.56900984", "0.5689677", "0.5682416", "0.5678144", "0.56766146", "0.5655368" ]
0.7658511
0
Obtains the number nonzeros in a rectangular piece of the linear constraint matrix. getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_)
def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_): numnz_ = ctypes.c_int32() res = __library__.MSK_XX_getapiecenumnz(self.__nativep,firsti_,lasti_,firstj_,lastj_,ctypes.byref(numnz_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numnz_ = numnz_.value _numnz_return_value = numnz_ return (_numnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_): # 3\n res,resargs = self.__obj.getapiecenumnz(firsti_,lasti_,firstj_,lastj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def getacolnumnz(self,i_):\n nzj_ = ctypes.c_int32()\n res = __library__.MSK_XX_getacolnumnz(self.__nativep,i_,ctypes.byref(nzj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzj_ = nzj_.value\n _nzj_return_value = nzj_\n return (_nzj_return_value)", "def getarownumnz(self,i_): # 3\n res,resargs = self.__obj.getarownumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n return _nzi_return_value", "def getacolslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getacolslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getarownumnz(self,i_):\n nzi_ = ctypes.c_int32()\n res = __library__.MSK_XX_getarownumnz(self.__nativep,i_,ctypes.byref(nzi_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzi_ = nzi_.value\n _nzi_return_value = nzi_\n return (_nzi_return_value)", "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getaslicenumnz(self,accmode_,first_,last_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res,resargs = self.__obj.getaslicenumnz64(accmode_,first_,last_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def nz(self):\n return self._dim[2]", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def nnz(self):", "def N_Col(self,val):\n\t\tif val in self.colums:\n\t\t\treturn self.colums.index(val)\n\t\telse:\n\t\t\treturn False", "def nnz(self):\n return self.rep.nnz()", "def innulo(self):\n for i in range(self.n):\n if not comozero(self[i]):\n return i\n return None", "def nnz(self):\n return self.to_ddm().nnz()", "def build_collocation(nr, nz):\n\n rootsr, _, Br, Wr = recur_colloc_symm(nr, 3)\n rootsz, Az, _ = recur_colloc(nz)\n \n return rootsz, Az, rootsr, Br, Wr", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136", "def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def zenith_nadir(x, y):\n if y == 'm':\n bb = []\n cc = []\n for i in range(x.shape[1]):\n bb.append(amax(x[:, i:i + 1]))\n b = array(bb)\n cc.append(amin(x[:, i:i + 1]))\n c = array(cc)\n return (b, c)\n else:\n b = ones(x.shape[1])\n c = zeros(x.shape[1])\n return (b, c)", "def test_get_date_column_index_first_col(self, one_row_worksheet):\n\n actual_result = one_row_worksheet.get_date_column_index()\n assert actual_result == 0", "def __column_height(self, x):\n\t\tcolumn = self.board[:, x]\n\t\treturn np.count_nonzero(column)", "def getnumbarcnz(self): # 3\n res,resargs = self.__obj.getnumbarcnz()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nz_return_value = resargs\n return _nz_return_value", "def first_zombie_col(self, row_num):\n row = self.board[row_num]\n for col_num, square in enumerate(row):\n if any(self.is_zombie([row_num, col_num])):\n return col_num", "def nnz(self):\n t = self.get_MSC()\n return len(np.unique(t['masks']))", "def nnz(self):\n\t\treturn self.st.size()", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col" ]
[ "0.81828666", "0.7616695", "0.75478375", "0.7345593", "0.7094305", "0.69862217", "0.6438003", "0.6430326", "0.61437994", "0.5996406", "0.57730126", "0.5638592", "0.55720025", "0.5547465", "0.5504438", "0.54538846", "0.54515296", "0.5447022", "0.5447022", "0.5447022", "0.5447022", "0.5428881", "0.53894436", "0.53851813", "0.53596854", "0.53377306", "0.5321408", "0.52835464", "0.5254005", "0.52440995" ]
0.79388636
1
Obtains the number of nonzero elements in one column of the linear constraint matrix getacolnumnz(self,i_)
def getacolnumnz(self,i_): nzj_ = ctypes.c_int32() res = __library__.MSK_XX_getacolnumnz(self.__nativep,i_,ctypes.byref(nzj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) nzj_ = nzj_.value _nzj_return_value = nzj_ return (_nzj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def getarownumnz(self,i_): # 3\n res,resargs = self.__obj.getarownumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n return _nzi_return_value", "def getarownumnz(self,i_):\n nzi_ = ctypes.c_int32()\n res = __library__.MSK_XX_getarownumnz(self.__nativep,i_,ctypes.byref(nzi_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzi_ = nzi_.value\n _nzi_return_value = nzi_\n return (_nzi_return_value)", "def colnum(self):\n \n colnum = 0\n for table in self.columnlabels:\n table = np.asarray(table)\n if np.ndim(table) <= 1:\n table = np.reshape(table, (1, -1))\n colnum += table.shape[1]\n return colnum", "def __column_height(self, x):\n\t\tcolumn = self.board[:, x]\n\t\treturn np.count_nonzero(column)", "def N_Col(self,val):\n\t\tif val in self.colums:\n\t\t\treturn self.colums.index(val)\n\t\telse:\n\t\t\treturn False", "def GetNumCols(self):\n return _hypre.HypreParMatrix_GetNumCols(self)", "def getNumCols(self):\n return self.__cols", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_): # 3\n res,resargs = self.__obj.getapiecenumnz(firsti_,lasti_,firstj_,lastj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def n_cols(self):\n ch = self.children\n return 1 if not ch else sum([c.n_cols for c in ch])", "def columnCount(self, parent_midx):\n return self._cols_nb", "def columnCount(self, parent_midx):\n return self._cols_nb", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col", "def getNbColumns(self):\n return self.data.shape[0]", "def ncolumns(self):\n return self.__ncols", "def num_cols(self):\n return (len(self.rows[0]))", "def columnCount(self):\n return abs(self.minCol) + abs(self.maxCol)", "def GetGlobalNumCols(self):\n return _hypre.HypreParMatrix_GetGlobalNumCols(self)", "def nz(self):\n return self._dim[2]", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_):\n numnz_ = ctypes.c_int32()\n res = __library__.MSK_XX_getapiecenumnz(self.__nativep,firsti_,lasti_,firstj_,lastj_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def GetNumberCols(self):\n return len(self.__colsKey)", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def GetColumnCount(self):\r\n\r\n return len(self._columns)", "def columnCount(self, parent):\r\n if len(self.arraydata) > 0:\r\n return len(self.arraydata[0]) - 2\r\n return 0", "def ncolumns(self):\n return len(self.__column_list)", "def nnz(self):\n return self.to_ddm().nnz()", "def num_cols(self):\n return len(self.rows[0])", "def columns(self):\n try:\n return self._data.shape[1] or 1\n except (AttributeError, IndexError):\n return 1" ]
[ "0.78955895", "0.6964569", "0.66872597", "0.6391909", "0.63877714", "0.632295", "0.63077927", "0.6225606", "0.6197037", "0.61326903", "0.61255556", "0.60932916", "0.60414433", "0.60414433", "0.5994617", "0.5992448", "0.59544677", "0.5947154", "0.5945124", "0.5940501", "0.5913796", "0.5907808", "0.5896922", "0.58867306", "0.5871345", "0.58662325", "0.5861006", "0.5858896", "0.58293295", "0.57936364" ]
0.7779385
1
Obtains one column of the linear constraint matrix. getacol(self,j_,subj_,valj_)
def getacol(self,j_,subj_,valj_): nzj_ = ctypes.c_int32() _subj_minlength = self.getacolnumnz((j_)) if self.getacolnumnz((j_)) > 0 and subj_ is not None and len(subj_) != self.getacolnumnz((j_)): raise ValueError("Array argument subj is not long enough: Is %d, expected %d" % (len(subj_),self.getacolnumnz((j_)))) if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable: raise ValueError("Argument subj must be writable") if subj_ is None: raise ValueError("Argument subj may not be None") if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous: _subj_copyarray = False _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subj_ is not None: _subj_copyarray = True _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32)) _subj_np_tmp[:] = subj_ assert _subj_np_tmp.flags.contiguous _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subj_copyarray = False _subj_tmp = None _valj_minlength = self.getacolnumnz((j_)) if self.getacolnumnz((j_)) > 0 and valj_ is not None and len(valj_) != self.getacolnumnz((j_)): raise ValueError("Array argument valj is not long enough: Is %d, expected %d" % (len(valj_),self.getacolnumnz((j_)))) if isinstance(valj_,numpy.ndarray) and not valj_.flags.writeable: raise ValueError("Argument valj must be writable") if valj_ is None: raise ValueError("Argument valj may not be None") if isinstance(valj_, numpy.ndarray) and valj_.dtype is numpy.dtype(numpy.float64) and valj_.flags.contiguous: _valj_copyarray = False _valj_tmp = ctypes.cast(valj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif valj_ is not None: _valj_copyarray = True _valj_np_tmp = numpy.zeros(len(valj_),numpy.dtype(numpy.float64)) _valj_np_tmp[:] = valj_ assert _valj_np_tmp.flags.contiguous _valj_tmp = ctypes.cast(_valj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _valj_copyarray = False _valj_tmp = None res = __library__.MSK_XX_getacol(self.__nativep,j_,ctypes.byref(nzj_),_subj_tmp,_valj_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) nzj_ = nzj_.value _nzj_return_value = nzj_ if _subj_copyarray: subj_[:] = _subj_np_tmp if _valj_copyarray: valj_[:] = _valj_np_tmp return (_nzj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getacol(self,j_,subj,valj): # 3\n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument subj has wrong length\")\n if valj is None: raise TypeError(\"Invalid type for argument valj\")\n _copyback_valj = False\n if valj is None:\n valj_ = None\n else:\n try:\n valj_ = memoryview(valj)\n except TypeError:\n try:\n _tmparr_valj = array.array(\"d\",valj)\n except TypeError:\n raise TypeError(\"Argument valj has wrong type\")\n else:\n valj_ = memoryview(_tmparr_valj)\n _copyback_valj = True\n else:\n if valj_.format != \"d\":\n valj_ = memoryview(array.array(\"d\",valj))\n _copyback_valj = True\n if valj_ is not None and len(valj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument valj has wrong length\")\n res,resargs = self.__obj.getacol(j_,subj_,valj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n if _copyback_valj:\n valj[:] = _tmparr_valj\n if _copyback_subj:\n subj[:] = _tmparr_subj\n return _nzj_return_value", "def fast_get_col(self,j):\n col = self.col_view[:,j].copy()\n col.data = self.X.data[col.data]\n return col", "def sudoku_getcol(A, j):\r\n n = len(A)\r\n B = [0 for i in range(n)]\r\n for i in range(n):\r\n B[i] = A[i][j]\r\n return B", "def get_column(A: Matrix, j: int) -> Vector:\n return [A_i[j] for A_i in A]", "def get_column(A: Matrix, j: int) -> Vector:\n return [A_i[j] for A_i in A]", "def get_column(A: Matrix, j: int) -> Vector:\n return [A_i[j] for A_i in A]", "def get_column(A: Matrix, j: int) -> Vector:\n return [A_i[j] # jth element of row A_i\n for A_i in A] # for each row A_i", "def _get_col(self, idx):\n return self.line[self._fwf.column_slices[idx]]", "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def get_col(b, ci):\r\n return [b[0][ci], b[1][ci], b[2][ci]]", "def get_col(A,r=0):\n\treturn list(A[r])", "def col(self, i):\n return Vector([row[i] for row in self.data])", "def getacolnumnz(self,i_):\n nzj_ = ctypes.c_int32()\n res = __library__.MSK_XX_getacolnumnz(self.__nativep,i_,ctypes.byref(nzj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzj_ = nzj_.value\n _nzj_return_value = nzj_\n return (_nzj_return_value)", "def ColPart(self, *args):\n return _hypre.HypreParMatrix_ColPart(self, *args)", "def putacol(self,j_,subj,valj): # 3\n nzj_ = None\n if nzj_ is None:\n nzj_ = len(subj)\n elif nzj_ != len(subj):\n raise IndexError(\"Inconsistent length of array subj\")\n if nzj_ is None:\n nzj_ = len(valj)\n elif nzj_ != len(valj):\n raise IndexError(\"Inconsistent length of array valj\")\n if nzj_ is None: nzj_ = 0\n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n \n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n \n if valj is None: raise TypeError(\"Invalid type for argument valj\")\n if valj is None:\n valj_ = None\n else:\n try:\n valj_ = memoryview(valj)\n except TypeError:\n try:\n _tmparr_valj = array.array(\"d\",valj)\n except TypeError:\n raise TypeError(\"Argument valj has wrong type\")\n else:\n valj_ = memoryview(_tmparr_valj)\n \n else:\n if valj_.format != \"d\":\n valj_ = memoryview(array.array(\"d\",valj))\n \n res = self.__obj.putacol(j_,nzj_,subj_,valj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putacol(self,j_,subj_,valj_):\n nzj_ = None\n if nzj_ is None:\n nzj_ = len(subj_)\n elif nzj_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if nzj_ is None:\n nzj_ = len(valj_)\n elif nzj_ != len(valj_):\n raise IndexError(\"Inconsistent length of array valj\")\n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if valj_ is None:\n raise ValueError(\"Argument valj cannot be None\")\n if valj_ is None:\n raise ValueError(\"Argument valj may not be None\")\n if isinstance(valj_, numpy.ndarray) and valj_.dtype is numpy.dtype(numpy.float64) and valj_.flags.contiguous:\n _valj_copyarray = False\n _valj_tmp = ctypes.cast(valj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valj_ is not None:\n _valj_copyarray = True\n _valj_np_tmp = numpy.zeros(len(valj_),numpy.dtype(numpy.float64))\n _valj_np_tmp[:] = valj_\n assert _valj_np_tmp.flags.contiguous\n _valj_tmp = ctypes.cast(_valj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valj_copyarray = False\n _valj_tmp = None\n \n res = __library__.MSK_XX_putacol(self.__nativep,j_,nzj_,_subj_tmp,_valj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def col(self):\n\t\treturn self.__col", "def GetColPartArray(self):\n return _hypre.HypreParMatrix_GetColPartArray(self)", "def get_cell(self, i, j):\n return self._env[i][j]", "def collocation(lhs, rhs, basis, nterms, domain=(0, 1), x_col=None):\n x0, x1 = domain\n if x_col is None:\n dx = S(x1 - x0)/(nterms - 2)\n x_col = [dx + dx*cont for cont in range(nterms)]\n x = symbols(\"x\")\n A_mat = zeros(nterms, nterms)\n b_vec = zeros(nterms, 1)\n for row in range(nterms):\n b_vec[row] = rhs(x_col[row])\n for col in range(nterms):\n phi_j = basis(x, col)\n A_mat[row, col] = lhs(phi_j, x).subs(x, x_col[row])\n return A_mat, b_vec", "def _colvec(x):\n x = np.atleast_1d(x)\n return x[:, None]", "def jmat(ind: int):\n return _jm[ind - 1]", "def _get_column(self, index):\n left, right = self._get_columns()\n return left if index < left.count else right", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def column(self, index):\n return [row[index - 1] for row in self.matrix_list]", "def column_fast(self, key):\n return self._matrix[:, self.dataframe.columns.get_loc(key)]", "def getsparsesymmat(self,idx_,subi,subj,valij): # 3\n maxlen_ = self.getsymmatinfo((idx_))[1]\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxlen_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxlen_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_valij = False\n if valij is None:\n valij_ = None\n else:\n try:\n valij_ = memoryview(valij)\n except TypeError:\n try:\n _tmparr_valij = array.array(\"d\",valij)\n except TypeError:\n raise TypeError(\"Argument valij has wrong type\")\n else:\n valij_ = memoryview(_tmparr_valij)\n _copyback_valij = True\n else:\n if valij_.format != \"d\":\n valij_ = memoryview(array.array(\"d\",valij))\n _copyback_valij = True\n if valij_ is not None and len(valij_) != (maxlen_):\n raise ValueError(\"Array argument valij has wrong length\")\n res = self.__obj.getsparsesymmat(idx_,maxlen_,subi_,subj_,valij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_valij:\n valij[:] = _tmparr_valij\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def get_real_col(self, index):\n\n return self.col2virt.get(index, index)", "def getCol(self, column_name):\n idx = self.colnames.index(column_name)\n return self.getColByIdx(idx)" ]
[ "0.7543346", "0.6976331", "0.6404439", "0.6330695", "0.6330695", "0.6330695", "0.6279653", "0.6225708", "0.6189424", "0.61752653", "0.61163116", "0.5897423", "0.5880596", "0.5877866", "0.5869485", "0.58650994", "0.58379793", "0.57342744", "0.56699353", "0.564279", "0.5636988", "0.5570075", "0.5565305", "0.55593073", "0.5546852", "0.5543069", "0.5525753", "0.549953", "0.5475881", "0.5465472" ]
0.74751633
1
Obtains a sequence of columns from the coefficient matrix. getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_)
def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_): maxnumnz_ = self.getacolslicenumnz((first_),(last_)) _ptrb_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)): raise ValueError("Array argument ptrb is not long enough: Is %d, expected %d" % (len(ptrb_),((last_) - (first_)))) if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable: raise ValueError("Argument ptrb must be writable") if ptrb_ is None: raise ValueError("Argument ptrb may not be None") if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous: _ptrb_copyarray = False _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif ptrb_ is not None: _ptrb_copyarray = True _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64)) _ptrb_np_tmp[:] = ptrb_ assert _ptrb_np_tmp.flags.contiguous _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _ptrb_copyarray = False _ptrb_tmp = None _ptre_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)): raise ValueError("Array argument ptre is not long enough: Is %d, expected %d" % (len(ptre_),((last_) - (first_)))) if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable: raise ValueError("Argument ptre must be writable") if ptre_ is None: raise ValueError("Argument ptre may not be None") if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous: _ptre_copyarray = False _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif ptre_ is not None: _ptre_copyarray = True _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64)) _ptre_np_tmp[:] = ptre_ assert _ptre_np_tmp.flags.contiguous _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _ptre_copyarray = False _ptre_tmp = None _sub_minlength = (maxnumnz_) if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_): raise ValueError("Array argument sub is not long enough: Is %d, expected %d" % (len(sub_),(maxnumnz_))) if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable: raise ValueError("Argument sub must be writable") if sub_ is None: raise ValueError("Argument sub may not be None") if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous: _sub_copyarray = False _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif sub_ is not None: _sub_copyarray = True _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32)) _sub_np_tmp[:] = sub_ assert _sub_np_tmp.flags.contiguous _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _sub_copyarray = False _sub_tmp = None _val_minlength = (maxnumnz_) if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_): raise ValueError("Array argument val is not long enough: Is %d, expected %d" % (len(val_),(maxnumnz_))) if isinstance(val_,numpy.ndarray) and not val_.flags.writeable: raise ValueError("Argument val must be writable") if val_ is None: raise ValueError("Argument val may not be None") if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous: _val_copyarray = False _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif val_ is not None: _val_copyarray = True _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64)) _val_np_tmp[:] = val_ assert _val_np_tmp.flags.contiguous _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _val_copyarray = False _val_tmp = None surp_ = ctypes.c_int64(_sub_minlength) res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _ptrb_copyarray: ptrb_[:] = _ptrb_np_tmp if _ptre_copyarray: ptre_[:] = _ptre_np_tmp if _sub_copyarray: sub_[:] = _sub_np_tmp if _val_copyarray: val_[:] = _val_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putacolslice(self,first_,last_,ptrb_,ptre_,asub_,aval_):\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putacolslice64(self.__nativep,first_,last_,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getarowslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getacolslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def putacolslice(self,first_,last_,ptrb,ptre,asub,aval): # 3\n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putacolslice64(first_,last_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def getaslice(self,accmode_,first_,last_,ptrb,ptre,sub,val): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n maxnumnz_ = self.getaslicenumnz((accmode_),(first_),(last_))\n _copyback_ptrb = False\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n _copyback_ptrb = True\n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n _copyback_ptrb = True\n if ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb has wrong length\")\n _copyback_ptre = False\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n _copyback_ptre = True\n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n _copyback_ptre = True\n if ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre has wrong length\")\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getaslice64(accmode_,first_,last_,maxnumnz_,len(sub),ptrb_,ptre_,sub_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_sub:\n sub[:] = _tmparr_sub\n if _copyback_ptre:\n ptre[:] = _tmparr_ptre\n if _copyback_ptrb:\n ptrb[:] = _tmparr_ptrb", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def ColPart(self, *args):\n return _hypre.HypreParMatrix_ColPart(self, *args)", "def __getslice__(self,i,j):\n return self.x[i:j]", "def slice(A,rowrange,colrange):\n\n\treturn [[get_elem(A,j,i) for j in rowrange] for i in colrange]", "def get_col(b, ci):\r\n return [b[0][ci], b[1][ci], b[2][ci]]", "def getacolslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getacolslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def get_cols(self) :\n\n return list(self.cols)[1:]", "def get_col(A,r=0):\n\treturn list(A[r])", "def sub_columns(arr, sub_size):\n return sub_rows(arr.T, sub_size)", "def slice_matrix(m,i,j):\n return np.take(np.take(m,i,0),j,1)", "def GetColPartArray(self):\n return _hypre.HypreParMatrix_GetColPartArray(self)", "def slice2(self, cvars=None,ctuple=None):\n return self.condition2(cvars,ctuple)", "def test05(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(None, None, -3)\n # print \"b[sl]->\", `b[sl]`\n self.assertRaises(NotImplementedError, b.__getitem__, sl)", "def test02b(self):\n a = np.arange(1e2)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(-3)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test02d(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(-3, -1)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test02c(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1, -3)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def relay_array_getitem(c, a, start, stop, strides):\n assert start.is_constant(tuple)\n assert stop.is_constant(tuple)\n assert strides.is_constant(tuple)\n return relay.op.transform.strided_slice(c.ref(a), start.value, stop.value,\n strides.value)", "def slice_pdb(self, start, stop):\n return", "def test03b(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1, 80, 30)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test03(self):\n a = np.arange(1, 101)\n b = bcolz.carray(a)\n c = b[[1.1, 3.3]]\n r = a[[1, 3]]\n assert_array_equal(c, r, \"fancy indexing does not work correctly\")", "def getcslice(self,first_,last_,c): # 3\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((last_) - (first_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.getcslice(first_,last_,c_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_c:\n c[:] = _tmparr_c" ]
[ "0.68298435", "0.68189096", "0.6633351", "0.6410343", "0.635861", "0.62143683", "0.59577066", "0.58845204", "0.5850808", "0.5840428", "0.5830795", "0.5818751", "0.5787353", "0.5729822", "0.57094395", "0.5652637", "0.5626832", "0.5602785", "0.557522", "0.55677515", "0.5536629", "0.55353665", "0.5508671", "0.54177946", "0.53788126", "0.5366509", "0.5364556", "0.53594303", "0.5341699", "0.5337989" ]
0.8195238
0
Obtains one row of the linear constraint matrix. getarow(self,i_,subi_,vali_)
def getarow(self,i_,subi_,vali_): nzi_ = ctypes.c_int32() _subi_minlength = self.getarownumnz((i_)) if self.getarownumnz((i_)) > 0 and subi_ is not None and len(subi_) != self.getarownumnz((i_)): raise ValueError("Array argument subi is not long enough: Is %d, expected %d" % (len(subi_),self.getarownumnz((i_)))) if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable: raise ValueError("Argument subi must be writable") if subi_ is None: raise ValueError("Argument subi may not be None") if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous: _subi_copyarray = False _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subi_ is not None: _subi_copyarray = True _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32)) _subi_np_tmp[:] = subi_ assert _subi_np_tmp.flags.contiguous _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subi_copyarray = False _subi_tmp = None _vali_minlength = self.getarownumnz((i_)) if self.getarownumnz((i_)) > 0 and vali_ is not None and len(vali_) != self.getarownumnz((i_)): raise ValueError("Array argument vali is not long enough: Is %d, expected %d" % (len(vali_),self.getarownumnz((i_)))) if isinstance(vali_,numpy.ndarray) and not vali_.flags.writeable: raise ValueError("Argument vali must be writable") if vali_ is None: raise ValueError("Argument vali may not be None") if isinstance(vali_, numpy.ndarray) and vali_.dtype is numpy.dtype(numpy.float64) and vali_.flags.contiguous: _vali_copyarray = False _vali_tmp = ctypes.cast(vali_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif vali_ is not None: _vali_copyarray = True _vali_np_tmp = numpy.zeros(len(vali_),numpy.dtype(numpy.float64)) _vali_np_tmp[:] = vali_ assert _vali_np_tmp.flags.contiguous _vali_tmp = ctypes.cast(_vali_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _vali_copyarray = False _vali_tmp = None res = __library__.MSK_XX_getarow(self.__nativep,i_,ctypes.byref(nzi_),_subi_tmp,_vali_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) nzi_ = nzi_.value _nzi_return_value = nzi_ if _subi_copyarray: subi_[:] = _subi_np_tmp if _vali_copyarray: vali_[:] = _vali_np_tmp return (_nzi_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getarow(self,i_,subi,vali): # 3\n if subi is None: raise TypeError(\"Invalid type for argument subi\")\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != self.getarownumnz((i_)):\n raise ValueError(\"Array argument subi has wrong length\")\n if vali is None: raise TypeError(\"Invalid type for argument vali\")\n _copyback_vali = False\n if vali is None:\n vali_ = None\n else:\n try:\n vali_ = memoryview(vali)\n except TypeError:\n try:\n _tmparr_vali = array.array(\"d\",vali)\n except TypeError:\n raise TypeError(\"Argument vali has wrong type\")\n else:\n vali_ = memoryview(_tmparr_vali)\n _copyback_vali = True\n else:\n if vali_.format != \"d\":\n vali_ = memoryview(array.array(\"d\",vali))\n _copyback_vali = True\n if vali_ is not None and len(vali_) != self.getarownumnz((i_)):\n raise ValueError(\"Array argument vali has wrong length\")\n res,resargs = self.__obj.getarow(i_,subi_,vali_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n if _copyback_vali:\n vali[:] = _tmparr_vali\n if _copyback_subi:\n subi[:] = _tmparr_subi\n return _nzi_return_value", "def getrow(self, i):\n new = lil_matrix((1, self.shape[1]), dtype=self.dtype)\n new.rows[0] = self.rows[i][:]\n new.data[0] = self.data[i][:]\n return new", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i]", "def get_row(A: Matrix, i: int) -> Vector:\n return A[i] # A[i] is already the ith row", "def getrowview(self, i):\n new = lil_matrix((1, self.shape[1]), dtype=self.dtype)\n new.rows[0] = self.rows[i]\n new.data[0] = self.data[i]\n return new", "def est_row_2_base_row(i):\n row = np.zeros(n)\n for j in range(k):\n row[est_inx_2_base_inx[j]] = est_T[i,j]\n return row", "def getitem(self, i, j):\n # XXX: flint matrices do not support negative indices\n # XXX: They also raise ValueError instead of IndexError\n m, n = self.shape\n if i < 0:\n i += m\n if j < 0:\n j += n\n try:\n return self.rep[i, j]\n except ValueError:\n raise IndexError(f\"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}\")", "def getRow(self, i):\n return self.data[:,i]", "def row (self, i):\n return Vector(self._m[i])", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def putarow(self,i_,subi_,vali_):\n nzi_ = None\n if nzi_ is None:\n nzi_ = len(subi_)\n elif nzi_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if nzi_ is None:\n nzi_ = len(vali_)\n elif nzi_ != len(vali_):\n raise IndexError(\"Inconsistent length of array vali\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if vali_ is None:\n raise ValueError(\"Argument vali cannot be None\")\n if vali_ is None:\n raise ValueError(\"Argument vali may not be None\")\n if isinstance(vali_, numpy.ndarray) and vali_.dtype is numpy.dtype(numpy.float64) and vali_.flags.contiguous:\n _vali_copyarray = False\n _vali_tmp = ctypes.cast(vali_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif vali_ is not None:\n _vali_copyarray = True\n _vali_np_tmp = numpy.zeros(len(vali_),numpy.dtype(numpy.float64))\n _vali_np_tmp[:] = vali_\n assert _vali_np_tmp.flags.contiguous\n _vali_tmp = ctypes.cast(_vali_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _vali_copyarray = False\n _vali_tmp = None\n \n res = __library__.MSK_XX_putarow(self.__nativep,i_,nzi_,_subi_tmp,_vali_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def row(self, i):\n return Vector(self.data[i], False)", "def putarow(self,i_,subi,vali): # 3\n nzi_ = None\n if nzi_ is None:\n nzi_ = len(subi)\n elif nzi_ != len(subi):\n raise IndexError(\"Inconsistent length of array subi\")\n if nzi_ is None:\n nzi_ = len(vali)\n elif nzi_ != len(vali):\n raise IndexError(\"Inconsistent length of array vali\")\n if nzi_ is None: nzi_ = 0\n if subi is None: raise TypeError(\"Invalid type for argument subi\")\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n \n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n \n if vali is None: raise TypeError(\"Invalid type for argument vali\")\n if vali is None:\n vali_ = None\n else:\n try:\n vali_ = memoryview(vali)\n except TypeError:\n try:\n _tmparr_vali = array.array(\"d\",vali)\n except TypeError:\n raise TypeError(\"Argument vali has wrong type\")\n else:\n vali_ = memoryview(_tmparr_vali)\n \n else:\n if vali_.format != \"d\":\n vali_ = memoryview(array.array(\"d\",vali))\n \n res = self.__obj.putarow(i_,nzi_,subi_,vali_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def adj(self):\n\t\tres = SquareMatrix(self._rows)\n\t\tfor i in range(self._rows):\n\t\t\tfor j in range(self._rows):\n\t\t\t\tres[i][j] = ((-1) ** (i + j)) * self.minor(j, i)\n\t\treturn res", "def problem_reduction_single(self, i, val):\n y_update = - val * self.A.getcol(i).toarray().flatten()\n self.y += y_update\n self.A = sparse.hstack([self.A[:, :i], self.A[:, i + 1:]], format='csr')\n z_index = self.mask.searchsorted(i)\n self.mask = np.insert(self.mask, z_index, i)\n self.z = np.insert(self.z, z_index, val)", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def getsparsesymmat(self,idx_,subi_,subj_,valij_):\n maxlen_ = self.getsymmatinfo((idx_))[1]\n _subi_minlength = (maxlen_)\n if (maxlen_) > 0 and subi_ is not None and len(subi_) != (maxlen_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxlen_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxlen_)\n if (maxlen_) > 0 and subj_ is not None and len(subj_) != (maxlen_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxlen_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _valij_minlength = (maxlen_)\n if (maxlen_) > 0 and valij_ is not None and len(valij_) != (maxlen_):\n raise ValueError(\"Array argument valij is not long enough: Is %d, expected %d\" % (len(valij_),(maxlen_)))\n if isinstance(valij_,numpy.ndarray) and not valij_.flags.writeable:\n raise ValueError(\"Argument valij must be writable\")\n if isinstance(valij_, numpy.ndarray) and valij_.dtype is numpy.dtype(numpy.float64) and valij_.flags.contiguous:\n _valij_copyarray = False\n _valij_tmp = ctypes.cast(valij_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valij_ is not None:\n _valij_copyarray = True\n _valij_np_tmp = numpy.zeros(len(valij_),numpy.dtype(numpy.float64))\n _valij_np_tmp[:] = valij_\n assert _valij_np_tmp.flags.contiguous\n _valij_tmp = ctypes.cast(_valij_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valij_copyarray = False\n _valij_tmp = None\n \n res = __library__.MSK_XX_getsparsesymmat(self.__nativep,idx_,maxlen_,_subi_tmp,_subj_tmp,_valij_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _valij_copyarray:\n valij_[:] = _valij_np_tmp", "def row(self, index):\n return self.matrix_list[index - 1]", "def get_weight_row(self, i):\n return self.weights[i]", "def getsparsesymmat(self,idx_,subi,subj,valij): # 3\n maxlen_ = self.getsymmatinfo((idx_))[1]\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxlen_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxlen_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_valij = False\n if valij is None:\n valij_ = None\n else:\n try:\n valij_ = memoryview(valij)\n except TypeError:\n try:\n _tmparr_valij = array.array(\"d\",valij)\n except TypeError:\n raise TypeError(\"Argument valij has wrong type\")\n else:\n valij_ = memoryview(_tmparr_valij)\n _copyback_valij = True\n else:\n if valij_.format != \"d\":\n valij_ = memoryview(array.array(\"d\",valij))\n _copyback_valij = True\n if valij_ is not None and len(valij_) != (maxlen_):\n raise ValueError(\"Array argument valij has wrong length\")\n res = self.__obj.getsparsesymmat(idx_,maxlen_,subi_,subj_,valij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_valij:\n valij[:] = _tmparr_valij\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def getarownumnz(self,i_): # 3\n res,resargs = self.__obj.getarownumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n return _nzi_return_value", "def get_stain_matrix(I):", "def appendsparsesymmat(self,dim_,subi_,subj_,valij_):\n nz_ = None\n if nz_ is None:\n nz_ = len(subi_)\n elif nz_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if nz_ is None:\n nz_ = len(subj_)\n elif nz_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if nz_ is None:\n nz_ = len(valij_)\n elif nz_ != len(valij_):\n raise IndexError(\"Inconsistent length of array valij\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if valij_ is None:\n raise ValueError(\"Argument valij cannot be None\")\n if valij_ is None:\n raise ValueError(\"Argument valij may not be None\")\n if isinstance(valij_, numpy.ndarray) and valij_.dtype is numpy.dtype(numpy.float64) and valij_.flags.contiguous:\n _valij_copyarray = False\n _valij_tmp = ctypes.cast(valij_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valij_ is not None:\n _valij_copyarray = True\n _valij_np_tmp = numpy.zeros(len(valij_),numpy.dtype(numpy.float64))\n _valij_np_tmp[:] = valij_\n assert _valij_np_tmp.flags.contiguous\n _valij_tmp = ctypes.cast(_valij_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valij_copyarray = False\n _valij_tmp = None\n \n idx_ = ctypes.c_int64()\n res = __library__.MSK_XX_appendsparsesymmat(self.__nativep,dim_,nz_,_subi_tmp,_subj_tmp,_valij_tmp,ctypes.byref(idx_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n idx_ = idx_.value\n _idx_return_value = idx_\n return (_idx_return_value)", "def get_element(self,mat,row,column):\n result = mat[row-1][column-1]\n self.element = result\n return self.element", "def get_elem (A, i, j):\n\treturn A[j][i]", "def rule(model, i, j):\n return 1, model.T0_end[i, j] + (1-model.A[i, j]), None", "def getarowslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getarowslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def right(self, A, i):\n if 2*i + 1 +1 < len(A):\n return 2*i + 1 +1\n return None" ]
[ "0.6920191", "0.6274343", "0.62173253", "0.62173253", "0.62173253", "0.6196485", "0.5856732", "0.5815419", "0.5781744", "0.5771555", "0.5736372", "0.56483686", "0.5642731", "0.5605737", "0.55912125", "0.5473553", "0.5381739", "0.53433347", "0.5286959", "0.52848995", "0.52556133", "0.5220344", "0.51907104", "0.5190614", "0.5186549", "0.5185217", "0.5182906", "0.5165823", "0.51297396", "0.51153654" ]
0.6652325
1
Obtains the number of nonzeros in a slice of columns of the coefficient matrix. getacolslicenumnz(self,first_,last_)
def getacolslicenumnz(self,first_,last_): numnz_ = ctypes.c_int64() res = __library__.MSK_XX_getacolslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numnz_ = numnz_.value _numnz_return_value = numnz_ return (_numnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_): # 3\n res,resargs = self.__obj.getapiecenumnz(firsti_,lasti_,firstj_,lastj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def getaslicenumnz(self,accmode_,first_,last_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res,resargs = self.__obj.getaslicenumnz64(accmode_,first_,last_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def getacolnumnz(self,i_):\n nzj_ = ctypes.c_int32()\n res = __library__.MSK_XX_getacolnumnz(self.__nativep,i_,ctypes.byref(nzj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzj_ = nzj_.value\n _nzj_return_value = nzj_\n return (_nzj_return_value)", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_):\n numnz_ = ctypes.c_int32()\n res = __library__.MSK_XX_getapiecenumnz(self.__nativep,firsti_,lasti_,firstj_,lastj_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getarownumnz(self,i_): # 3\n res,resargs = self.__obj.getarownumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n return _nzi_return_value", "def getNumCols(self):\n return self.__cols", "def GetNumCols(self):\n return _hypre.HypreParMatrix_GetNumCols(self)", "def colnum(self):\n \n colnum = 0\n for table in self.columnlabels:\n table = np.asarray(table)\n if np.ndim(table) <= 1:\n table = np.reshape(table, (1, -1))\n colnum += table.shape[1]\n return colnum", "def columnCount(self):\n return abs(self.minCol) + abs(self.maxCol)", "def getNoOfCols(self):\n return _patchExtractor.patchExtractor_getNoOfCols(self)", "def getarownumnz(self,i_):\n nzi_ = ctypes.c_int32()\n res = __library__.MSK_XX_getarownumnz(self.__nativep,i_,ctypes.byref(nzi_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzi_ = nzi_.value\n _nzi_return_value = nzi_\n return (_nzi_return_value)", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def GetNumberCols(self):\n return len(self.__colsKey)", "def num_cols(self):\n return (len(self.rows[0]))", "def getNbColumns(self):\n return self.data.shape[0]", "def ncolumns(self):\n return self.__ncols", "def get_num_cols(self):\n return self._num_cols", "def GetNumColumns(self):\n return len(self.columns)", "def get_cols_dummy():", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def num_cols(self):\n return len(self.rows[0])", "def GetColStarts(self):\n return _hypre.HypreParMatrix_GetColStarts(self)", "def get_cols(self) :\n\n return list(self.cols)[1:]", "def ncolumns(self):\n return len(self.__column_list)", "def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def __column_height(self, x):\n\t\tcolumn = self.board[:, x]\n\t\treturn np.count_nonzero(column)", "def N_Col(self,val):\n\t\tif val in self.colums:\n\t\t\treturn self.colums.index(val)\n\t\telse:\n\t\t\treturn False" ]
[ "0.7030425", "0.6908309", "0.69077706", "0.68798804", "0.67562634", "0.6697987", "0.60632735", "0.60440516", "0.59911394", "0.5967844", "0.59044164", "0.5890069", "0.58208543", "0.57897", "0.5725127", "0.5724517", "0.57188284", "0.5676727", "0.5675189", "0.5605064", "0.5597147", "0.5590894", "0.5590643", "0.5586166", "0.5583682", "0.5570443", "0.55701524", "0.5567639", "0.5559105", "0.5553589" ]
0.7720494
0
Obtains the number of nonzeros in a slice of rows of the coefficient matrix. getarowslicenumnz(self,first_,last_)
def getarowslicenumnz(self,first_,last_): numnz_ = ctypes.c_int64() res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numnz_ = numnz_.value _numnz_return_value = numnz_ return (_numnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getacolslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getacolslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_): # 3\n res,resargs = self.__obj.getapiecenumnz(firsti_,lasti_,firstj_,lastj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def getapiecenumnz(self,firsti_,lasti_,firstj_,lastj_):\n numnz_ = ctypes.c_int32()\n res = __library__.MSK_XX_getapiecenumnz(self.__nativep,firsti_,lasti_,firstj_,lastj_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getaslicenumnz(self,accmode_,first_,last_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res,resargs = self.__obj.getaslicenumnz64(accmode_,first_,last_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n return _numnz_return_value", "def getacolnumnz(self,i_):\n nzj_ = ctypes.c_int32()\n res = __library__.MSK_XX_getacolnumnz(self.__nativep,i_,ctypes.byref(nzj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzj_ = nzj_.value\n _nzj_return_value = nzj_\n return (_nzj_return_value)", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def getarownumnz(self,i_):\n nzi_ = ctypes.c_int32()\n res = __library__.MSK_XX_getarownumnz(self.__nativep,i_,ctypes.byref(nzi_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzi_ = nzi_.value\n _nzi_return_value = nzi_\n return (_nzi_return_value)", "def nnz(self):\n return self.rep.nnz()", "def nnz(self):\n return self.to_ddm().nnz()", "def getarownumnz(self,i_): # 3\n res,resargs = self.__obj.getarownumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzi_return_value = resargs\n return _nzi_return_value", "def nnz(self):\n return len(self.data)", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def numRowsCols(array):\n return len(array),len(array[0])", "def nnz(self):", "def nnz(self):\n return len(self.value)", "def nnz(self):\n t = self.get_MSC()\n return len(np.unique(t['masks']))", "def count_lead_zs(self,x):\n display_mask = 1 << 31\n cnt = 0\n for c in xrange(1,33):\n if((x & display_mask) == 0):\n cnt += 1\n else:\n return cnt\n x <<= 1\n return cnt", "def matrix_dim(CT):\r\n if CT[0]==0 and CT[-1]==0:\r\n return 2\r\n elif CT[0]!=0 and CT[-1]!=0:\r\n return 4", "def nz(self):\n return self._dim[2]", "def getacolslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getarowslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def nnz(self):\n\t\treturn self.st.size()", "def GetNumCols(self):\n return _hypre.HypreParMatrix_GetNumCols(self)", "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def N_z(self) -> int:\n return self.params.N_z", "def getarowslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getarowslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def NNZ(self):\n return _hypre.HypreParMatrix_NNZ(self)", "def nrows(self):\n if self.ncolumns() == 0:\n return 0\n nrows = self.table_column(0).nrows()\n for i in range(1, self.ncolumns()):\n nrows = min(self.table_column(i).nrows(), nrows)\n return nrows" ]
[ "0.6821424", "0.6395617", "0.63196206", "0.6160555", "0.6055939", "0.5976274", "0.5861332", "0.57293874", "0.57270485", "0.5675843", "0.5640828", "0.5561198", "0.5522415", "0.5509788", "0.5488033", "0.5468512", "0.5459757", "0.54302907", "0.5399739", "0.5388458", "0.5382514", "0.5348088", "0.530128", "0.5296612", "0.51692855", "0.50978416", "0.50846", "0.5081954", "0.5080517", "0.5080003" ]
0.7653431
0
Obtains a sequence of rows from the coefficient matrix. getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_)
def getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_): maxnumnz_ = self.getarowslicenumnz((first_),(last_)) _ptrb_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)): raise ValueError("Array argument ptrb is not long enough: Is %d, expected %d" % (len(ptrb_),((last_) - (first_)))) if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable: raise ValueError("Argument ptrb must be writable") if ptrb_ is None: raise ValueError("Argument ptrb may not be None") if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous: _ptrb_copyarray = False _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif ptrb_ is not None: _ptrb_copyarray = True _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64)) _ptrb_np_tmp[:] = ptrb_ assert _ptrb_np_tmp.flags.contiguous _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _ptrb_copyarray = False _ptrb_tmp = None _ptre_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)): raise ValueError("Array argument ptre is not long enough: Is %d, expected %d" % (len(ptre_),((last_) - (first_)))) if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable: raise ValueError("Argument ptre must be writable") if ptre_ is None: raise ValueError("Argument ptre may not be None") if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous: _ptre_copyarray = False _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif ptre_ is not None: _ptre_copyarray = True _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64)) _ptre_np_tmp[:] = ptre_ assert _ptre_np_tmp.flags.contiguous _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _ptre_copyarray = False _ptre_tmp = None _sub_minlength = (maxnumnz_) if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_): raise ValueError("Array argument sub is not long enough: Is %d, expected %d" % (len(sub_),(maxnumnz_))) if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable: raise ValueError("Argument sub must be writable") if sub_ is None: raise ValueError("Argument sub may not be None") if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous: _sub_copyarray = False _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif sub_ is not None: _sub_copyarray = True _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32)) _sub_np_tmp[:] = sub_ assert _sub_np_tmp.flags.contiguous _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _sub_copyarray = False _sub_tmp = None _val_minlength = (maxnumnz_) if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_): raise ValueError("Array argument val is not long enough: Is %d, expected %d" % (len(val_),(maxnumnz_))) if isinstance(val_,numpy.ndarray) and not val_.flags.writeable: raise ValueError("Argument val must be writable") if val_ is None: raise ValueError("Argument val may not be None") if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous: _val_copyarray = False _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif val_ is not None: _val_copyarray = True _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64)) _val_np_tmp[:] = val_ assert _val_np_tmp.flags.contiguous _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _val_copyarray = False _val_tmp = None surp_ = ctypes.c_int64(_sub_minlength) res = __library__.MSK_XX_getarowslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _ptrb_copyarray: ptrb_[:] = _ptrb_np_tmp if _ptre_copyarray: ptre_[:] = _ptre_np_tmp if _sub_copyarray: sub_[:] = _sub_np_tmp if _val_copyarray: val_[:] = _val_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getaslice(self,accmode_,first_,last_,ptrb,ptre,sub,val): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n maxnumnz_ = self.getaslicenumnz((accmode_),(first_),(last_))\n _copyback_ptrb = False\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n _copyback_ptrb = True\n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n _copyback_ptrb = True\n if ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb has wrong length\")\n _copyback_ptre = False\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n _copyback_ptre = True\n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n _copyback_ptre = True\n if ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre has wrong length\")\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getaslice64(accmode_,first_,last_,maxnumnz_,len(sub),ptrb_,ptre_,sub_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_sub:\n sub[:] = _tmparr_sub\n if _copyback_ptre:\n ptre[:] = _tmparr_ptre\n if _copyback_ptrb:\n ptrb[:] = _tmparr_ptrb", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def getarowslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.con,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getarowslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def putacolslice(self,first_,last_,ptrb_,ptre_,asub_,aval_):\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putacolslice64(self.__nativep,first_,last_,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getacolslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getarowslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getarowslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def putarowslice(self,first_,last_,ptrb_,ptre_,asub_,aval_):\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putarowslice64(self.__nativep,first_,last_,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def __getslice__(self,i,j):\n return self.x[i:j]", "def putarowslice(self,first_,last_,ptrb,ptre,asub,aval): # 3\n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb has wrong length\")\n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre has wrong length\")\n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putarowslice64(first_,last_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def putacolslice(self,first_,last_,ptrb,ptre,asub,aval): # 3\n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putacolslice64(first_,last_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def row_slice(self, xt, nproc):\n if nproc is None: nproc = self.nproc\n cs = xt.shape[0]//nproc #chuncksize\n tmp = [xt[i*cs:cs*i+cs,:] for i in range(nproc)]\n if nproc*cs != xt.shape[0]:\n tmp[-1] = np.concatenate((tmp[-1],xt[nproc*cs:xt.shape[0],:]),axis=0)\n return tmp", "def slice(A,rowrange,colrange):\n\n\treturn [[get_elem(A,j,i) for j in rowrange] for i in colrange]", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def slice_matrix(m,i,j):\n return np.take(np.take(m,i,0),j,1)", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def relay_array_getitem(c, a, start, stop, strides):\n assert start.is_constant(tuple)\n assert stop.is_constant(tuple)\n assert strides.is_constant(tuple)\n return relay.op.transform.strided_slice(c.ref(a), start.value, stop.value,\n strides.value)", "def getcslice(self,first_,last_,c_):\n _c_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and c_ is not None and len(c_) != ((last_) - (first_)):\n raise ValueError(\"Array argument c is not long enough: Is %d, expected %d\" % (len(c_),((last_) - (first_))))\n if isinstance(c_,numpy.ndarray) and not c_.flags.writeable:\n raise ValueError(\"Argument c must be writable\")\n if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous:\n _c_copyarray = False\n _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif c_ is not None:\n _c_copyarray = True\n _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64))\n _c_np_tmp[:] = c_\n assert _c_np_tmp.flags.contiguous\n _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _c_copyarray = False\n _c_tmp = None\n \n res = __library__.MSK_XX_getcslice(self.__nativep,first_,last_,_c_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _c_copyarray:\n c_[:] = _c_np_tmp", "def getcslice(self,first_,last_,c): # 3\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((last_) - (first_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.getcslice(first_,last_,c_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_c:\n c[:] = _tmparr_c", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def getxxslice(self,whichsol_,first_,last_,xx_):\n _xx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and xx_ is not None and len(xx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),((last_) - (first_))))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n res = __library__.MSK_XX_getxxslice(self.__nativep,whichsol_,first_,last_,_xx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp", "def slice2(self, vs=None,xs=None):\n return self.condition2(vs,xs)", "def as_slice(self):\n # slice for accessing arrays of values\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)", "def getxxslice(self,whichsol_,first_,last_,xx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xx has wrong length\")\n res = self.__obj.getxxslice(whichsol_,first_,last_,xx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_xx:\n xx[:] = _tmparr_xx", "def slice2(self, cvars=None,ctuple=None):\n return self.condition2(cvars,ctuple)", "def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___getslice__(self, *args)" ]
[ "0.72424006", "0.7017984", "0.6397862", "0.6392683", "0.6295936", "0.62946856", "0.62616587", "0.6202416", "0.6158898", "0.60779953", "0.5999932", "0.59529024", "0.5874609", "0.5798113", "0.57943004", "0.5755523", "0.56610376", "0.5640397", "0.5582677", "0.55730736", "0.5569688", "0.5535197", "0.5525514", "0.551946", "0.5454037", "0.5440375", "0.5414295", "0.53724456", "0.5327867", "0.5305298" ]
0.7663158
0
Obtains a sequence of rows from the coefficient matrix in sparse triplet format. getarowslicetrip(self,first_,last_,subi_,subj_,val_)
def getarowslicetrip(self,first_,last_,subi_,subj_,val_): maxnumnz_ = self.getarowslicenumnz((first_),(last_)) _subi_minlength = (maxnumnz_) if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_): raise ValueError("Array argument subi is not long enough: Is %d, expected %d" % (len(subi_),(maxnumnz_))) if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable: raise ValueError("Argument subi must be writable") if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous: _subi_copyarray = False _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subi_ is not None: _subi_copyarray = True _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32)) _subi_np_tmp[:] = subi_ assert _subi_np_tmp.flags.contiguous _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subi_copyarray = False _subi_tmp = None _subj_minlength = (maxnumnz_) if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_): raise ValueError("Array argument subj is not long enough: Is %d, expected %d" % (len(subj_),(maxnumnz_))) if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable: raise ValueError("Argument subj must be writable") if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous: _subj_copyarray = False _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subj_ is not None: _subj_copyarray = True _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32)) _subj_np_tmp[:] = subj_ assert _subj_np_tmp.flags.contiguous _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subj_copyarray = False _subj_tmp = None _val_minlength = (maxnumnz_) if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_): raise ValueError("Array argument val is not long enough: Is %d, expected %d" % (len(val_),(maxnumnz_))) if isinstance(val_,numpy.ndarray) and not val_.flags.writeable: raise ValueError("Argument val must be writable") if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous: _val_copyarray = False _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif val_ is not None: _val_copyarray = True _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64)) _val_np_tmp[:] = val_ assert _val_np_tmp.flags.contiguous _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _val_copyarray = False _val_tmp = None surp_ = ctypes.c_int64(_subi_minlength) res = __library__.MSK_XX_getarowslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _subi_copyarray: subi_[:] = _subi_np_tmp if _subj_copyarray: subj_[:] = _subj_np_tmp if _val_copyarray: val_[:] = _val_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getarowslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.con,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getarowslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def getacolslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getarowslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getaslice(self,accmode_,first_,last_,ptrb,ptre,sub,val): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n maxnumnz_ = self.getaslicenumnz((accmode_),(first_),(last_))\n _copyback_ptrb = False\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n _copyback_ptrb = True\n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n _copyback_ptrb = True\n if ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb has wrong length\")\n _copyback_ptre = False\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n _copyback_ptre = True\n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n _copyback_ptre = True\n if ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre has wrong length\")\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getaslice64(accmode_,first_,last_,maxnumnz_,len(sub),ptrb_,ptre_,sub_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_sub:\n sub[:] = _tmparr_sub\n if _copyback_ptre:\n ptre[:] = _tmparr_ptre\n if _copyback_ptrb:\n ptrb[:] = _tmparr_ptrb", "def __getslice__(self,i,j):\n return self.x[i:j]", "def slice_matrix(m,i,j):\n return np.take(np.take(m,i,0),j,1)", "def getskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getsucslice(self,whichsol_,first_,last_,suc_):\n _suc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and suc_ is not None and len(suc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),((last_) - (first_))))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n res = __library__.MSK_XX_getsucslice(self.__nativep,whichsol_,first_,last_,_suc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def gather_rows_1(tt_mat, inds):\n cores = tt_mat.tt_cores\n slices = []\n batch_size = int(inds[0].shape[0])\n\n\n ranks = [int(tt_core.shape[0]) for tt_core in tt_mat.tt_cores] + [1, ]\n\n\n for k, core in enumerate(cores):\n i = inds[k]\n #core = core.permute(1, 0, 2, 3).to(inds.device)\n\n cur_slice = torch.index_select(core, 1, i)\n\n if k == 0:\n res = cur_slice\n\n else:\n res = res.view(batch_size, -1, ranks[k])\n curr_core = cur_slice.view(ranks[k], batch_size, -1)\n res = torch.einsum('oqb,bow->oqw', (res, curr_core))\n\n return res\n\n #slices.append(torch.index_select(core, 1, i).permute(1, 0, 2, 3))", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getsucslice(self,whichsol_,first_,last_,suc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument suc has wrong length\")\n res = self.__obj.getsucslice(whichsol_,first_,last_,suc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_suc:\n suc[:] = _tmparr_suc", "def getRow(self, i):\n return self.data[:,i]", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def getskxslice(self,whichsol_,first_,last_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskxslice(whichsol_,first_,last_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def __getitem__(self, index: Union[int, slice]) -> Union[D2TXTRow, List[D2TXTRow]]:\n return self._rows[index]", "def getskxslice(self,whichsol_,first_,last_,skx_):\n _skx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),((last_) - (first_))))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskxslice(self.__nativep,whichsol_,first_,last_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def row_slice(self, xt, nproc):\n if nproc is None: nproc = self.nproc\n cs = xt.shape[0]//nproc #chuncksize\n tmp = [xt[i*cs:cs*i+cs,:] for i in range(nproc)]\n if nproc*cs != xt.shape[0]:\n tmp[-1] = np.concatenate((tmp[-1],xt[nproc*cs:xt.shape[0],:]),axis=0)\n return tmp", "def getxxslice(self,whichsol_,first_,last_,xx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xx has wrong length\")\n res = self.__obj.getxxslice(whichsol_,first_,last_,xx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_xx:\n xx[:] = _tmparr_xx", "def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]", "def get_diagonal_subtensor_view(x, i0, i1):\r\n # We have to cast i0 and i0 to int because python 2.4 (and maybe later)\r\n # do not support indexing with 0-dim, 'int*' ndarrays.\r\n i0 = int(i0)\r\n i1 = int(i1)\r\n if x.shape[i0] < x.shape[i1]:\r\n raise NotImplementedError('is this allowed?')\r\n idx = [slice(None)] * x.ndim\r\n idx[i0] = slice(x.shape[i1] - 1, None, None)\r\n xview = x.__getitem__(tuple(idx))\r\n strides = list(xview.strides)\r\n strides[i1] -= strides[i0]\r\n xview.strides = strides\r\n return xview", "def slice(A,rowrange,colrange):\n\n\treturn [[get_elem(A,j,i) for j in rowrange] for i in colrange]", "def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]" ]
[ "0.7290529", "0.70560914", "0.6958755", "0.61386895", "0.5720467", "0.5619456", "0.56170875", "0.5535816", "0.5501307", "0.5492698", "0.5491768", "0.54850245", "0.5449865", "0.5407904", "0.5361041", "0.5322481", "0.53132904", "0.5291811", "0.5257537", "0.5243826", "0.5230079", "0.5227893", "0.5225116", "0.5212413", "0.5178705", "0.5102134", "0.50922775", "0.5070457", "0.5059146", "0.5037361" ]
0.7172589
1
Obtains a sequence of columns from the coefficient matrix in triplet format. getacolslicetrip(self,first_,last_,subi_,subj_,val_)
def getacolslicetrip(self,first_,last_,subi_,subj_,val_): maxnumnz_ = self.getacolslicenumnz((first_),(last_)) _subi_minlength = (maxnumnz_) if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_): raise ValueError("Array argument subi is not long enough: Is %d, expected %d" % (len(subi_),(maxnumnz_))) if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable: raise ValueError("Argument subi must be writable") if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous: _subi_copyarray = False _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subi_ is not None: _subi_copyarray = True _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32)) _subi_np_tmp[:] = subi_ assert _subi_np_tmp.flags.contiguous _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subi_copyarray = False _subi_tmp = None _subj_minlength = (maxnumnz_) if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_): raise ValueError("Array argument subj is not long enough: Is %d, expected %d" % (len(subj_),(maxnumnz_))) if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable: raise ValueError("Argument subj must be writable") if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous: _subj_copyarray = False _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subj_ is not None: _subj_copyarray = True _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32)) _subj_np_tmp[:] = subj_ assert _subj_np_tmp.flags.contiguous _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subj_copyarray = False _subj_tmp = None _val_minlength = (maxnumnz_) if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_): raise ValueError("Array argument val is not long enough: Is %d, expected %d" % (len(val_),(maxnumnz_))) if isinstance(val_,numpy.ndarray) and not val_.flags.writeable: raise ValueError("Argument val must be writable") if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous: _val_copyarray = False _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif val_ is not None: _val_copyarray = True _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64)) _val_np_tmp[:] = val_ assert _val_np_tmp.flags.contiguous _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _val_copyarray = False _val_tmp = None surp_ = ctypes.c_int64(_subi_minlength) res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _subi_copyarray: subi_[:] = _subi_np_tmp if _subj_copyarray: subj_[:] = _subj_np_tmp if _val_copyarray: val_[:] = _val_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def getacolslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getacolslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getarowslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.con,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getarowslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def getarowslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getarowslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def sub_columns(arr, sub_size):\n return sub_rows(arr.T, sub_size)", "def ColPart(self, *args):\n return _hypre.HypreParMatrix_ColPart(self, *args)", "def getarowslice(self,first_,last_,ptrb_,ptre_,sub_,val_):\n maxnumnz_ = self.getarowslicenumnz((first_),(last_))\n _ptrb_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptrb_ is not None and len(ptrb_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptrb is not long enough: Is %d, expected %d\" % (len(ptrb_),((last_) - (first_))))\n if isinstance(ptrb_,numpy.ndarray) and not ptrb_.flags.writeable:\n raise ValueError(\"Argument ptrb must be writable\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n _ptre_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and ptre_ is not None and len(ptre_) != ((last_) - (first_)):\n raise ValueError(\"Array argument ptre is not long enough: Is %d, expected %d\" % (len(ptre_),((last_) - (first_))))\n if isinstance(ptre_,numpy.ndarray) and not ptre_.flags.writeable:\n raise ValueError(\"Argument ptre must be writable\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n _sub_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and sub_ is not None and len(sub_) != (maxnumnz_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnumnz_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_sub_minlength)\n res = __library__.MSK_XX_getarowslice64(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_ptrb_tmp,_ptre_tmp,_sub_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _ptrb_copyarray:\n ptrb_[:] = _ptrb_np_tmp\n if _ptre_copyarray:\n ptre_[:] = _ptre_np_tmp\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def getarowslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getarowslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def __getslice__(self,i,j):\n return self.x[i:j]", "def putacolslice(self,first_,last_,ptrb_,ptre_,asub_,aval_):\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putacolslice64(self.__nativep,first_,last_,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getacolslicenumnz(self,first_,last_):\n numnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getacolslicenumnz64(self.__nativep,first_,last_,ctypes.byref(numnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n return (_numnz_return_value)", "def getacol(self,j_,subj,valj): # 3\n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument subj has wrong length\")\n if valj is None: raise TypeError(\"Invalid type for argument valj\")\n _copyback_valj = False\n if valj is None:\n valj_ = None\n else:\n try:\n valj_ = memoryview(valj)\n except TypeError:\n try:\n _tmparr_valj = array.array(\"d\",valj)\n except TypeError:\n raise TypeError(\"Argument valj has wrong type\")\n else:\n valj_ = memoryview(_tmparr_valj)\n _copyback_valj = True\n else:\n if valj_.format != \"d\":\n valj_ = memoryview(array.array(\"d\",valj))\n _copyback_valj = True\n if valj_ is not None and len(valj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument valj has wrong length\")\n res,resargs = self.__obj.getacol(j_,subj_,valj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n if _copyback_valj:\n valj[:] = _tmparr_valj\n if _copyback_subj:\n subj[:] = _tmparr_subj\n return _nzj_return_value", "def __getslice__(self,i,j):\n nv=_Matr()\n nv._Matr__c_elem().recup_relC(self._Matr__c_elem(),i,j)\n nv._Matr__maj()\n return nv", "def __getslice__(self,i,j):\n nv=_Matr()\n nv.__c_elem().recup_rel(self.__c_elem(),i,j)\n nv.__maj()\n return nv", "def get_col(b, ci):\r\n return [b[0][ci], b[1][ci], b[2][ci]]", "def slice_matrix(m,i,j):\n return np.take(np.take(m,i,0),j,1)", "def putacolslice(self,first_,last_,ptrb,ptre,asub,aval): # 3\n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putacolslice64(first_,last_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getacol(self,j_,subj_,valj_):\n nzj_ = ctypes.c_int32()\n _subj_minlength = self.getacolnumnz((j_))\n if self.getacolnumnz((j_)) > 0 and subj_ is not None and len(subj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),self.getacolnumnz((j_))))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _valj_minlength = self.getacolnumnz((j_))\n if self.getacolnumnz((j_)) > 0 and valj_ is not None and len(valj_) != self.getacolnumnz((j_)):\n raise ValueError(\"Array argument valj is not long enough: Is %d, expected %d\" % (len(valj_),self.getacolnumnz((j_))))\n if isinstance(valj_,numpy.ndarray) and not valj_.flags.writeable:\n raise ValueError(\"Argument valj must be writable\")\n if valj_ is None:\n raise ValueError(\"Argument valj may not be None\")\n if isinstance(valj_, numpy.ndarray) and valj_.dtype is numpy.dtype(numpy.float64) and valj_.flags.contiguous:\n _valj_copyarray = False\n _valj_tmp = ctypes.cast(valj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valj_ is not None:\n _valj_copyarray = True\n _valj_np_tmp = numpy.zeros(len(valj_),numpy.dtype(numpy.float64))\n _valj_np_tmp[:] = valj_\n assert _valj_np_tmp.flags.contiguous\n _valj_tmp = ctypes.cast(_valj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valj_copyarray = False\n _valj_tmp = None\n \n res = __library__.MSK_XX_getacol(self.__nativep,j_,ctypes.byref(nzj_),_subj_tmp,_valj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzj_ = nzj_.value\n _nzj_return_value = nzj_\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _valj_copyarray:\n valj_[:] = _valj_np_tmp\n return (_nzj_return_value)", "def get_cols(self) :\n\n return list(self.cols)[1:]", "def slice2(self, cvars=None,ctuple=None):\n return self.condition2(cvars,ctuple)", "def slice(A,rowrange,colrange):\n\n\treturn [[get_elem(A,j,i) for j in rowrange] for i in colrange]", "def GetColPartArray(self):\n return _hypre.HypreParMatrix_GetColPartArray(self)", "def col(self, i):\n return Vector([row[i] for row in self.data])", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def getacolnumnz(self,i_): # 3\n res,resargs = self.__obj.getacolnumnz(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nzj_return_value = resargs\n return _nzj_return_value", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def fast_get_col(self,j):\n col = self.col_view[:,j].copy()\n col.data = self.X.data[col.data]\n return col", "def var_slice(colnames, x):\n # TODO: produces bahavior similar to df.loc[:, \"V1\":\"V3\"], but can reverse\n # TODO: make DRY\n # TODO: reverse not including end points\n if isinstance(x.start, Var):\n start_indx = (colnames == x.start.name).idxmax()\n elif isinstance(x.start, str):\n start_indx = (colnames == x.start).idxmax()\n else:\n start_indx = x.start or 0\n\n if isinstance(x.stop, Var):\n stop_indx = (colnames == x.stop.name).idxmax() + 1\n elif isinstance(x.stop, str):\n stop_indx = (colnames == x.stop).idxmax() + 1\n else:\n stop_indx = x.stop or len(colnames)\n\n if start_indx > stop_indx:\n return stop_indx, start_indx\n else:\n return start_indx, stop_indx", "def get_col(A,r=0):\n\treturn list(A[r])" ]
[ "0.77380747", "0.69819945", "0.67847955", "0.6653259", "0.6169519", "0.5882231", "0.58708996", "0.57938963", "0.570199", "0.56744456", "0.56735504", "0.5606898", "0.5576777", "0.5566366", "0.5544074", "0.55073357", "0.5492098", "0.54868627", "0.5441892", "0.5436611", "0.5434624", "0.5387297", "0.53826", "0.53708196", "0.53248143", "0.5302407", "0.5276967", "0.5262948", "0.52569854", "0.52178645" ]
0.7907325
0
Obtains bound information for one constraint. getconbound(self,i_)
def getconbound(self,i_): bk_ = ctypes.c_int32() bl_ = ctypes.c_double() bu_ = ctypes.c_double() res = __library__.MSK_XX_getconbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _bk_return_value = boundkey(bk_.value) bl_ = bl_.value _bl_return_value = bl_ bu_ = bu_.value _bu_return_value = bu_ return (_bk_return_value,_bl_return_value,_bu_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconbound(self,i_): # 3\n res,resargs = self.__obj.getconbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def getvarbound(self,i_): # 3\n res,resargs = self.__obj.getvarbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def getbound(self,accmode_,i_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res,resargs = self.__obj.getbound(accmode_,i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def getvarbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getvarbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)", "def constraint(self) -> Constraint:\n return self._constraint", "def _get_one_bound(self, param_name):\n return getattr(self, '__' + param_name + '_bounds')", "def boundary_of_set(i):\n b = self.args[i].boundary\n for j, a in enumerate(self.args):\n if j != i:\n b = b - a.interior\n return b", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def chgconbound(self,i_,lower_,finite_,value_): # 3\n res = self.__obj.chgconbound(i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getConstraint(self):\n return self.gk, self.g_mink, self.g_maxk", "def constraints(self):\n return self._constraints", "def constraints(self):\n return self._constraints", "def putconbound(self,i_,bk_,bl_,bu_): # 3\n if not isinstance(bk_,boundkey): raise TypeError(\"Argument bk has wrong type\")\n res = self.__obj.putconbound(i_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_constraints(self):\n return self.constraints", "def constraints(self):\n ...", "def getConstraint(self, *args):\n return _libsbml.Model_getConstraint(self, *args)", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def chgconbound(self,i_,lower_,finite_,value_):\n res = __library__.MSK_XX_chgconbound(self.__nativep,i_,lower_,finite_,value_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def _getBound(self):\n if self._colormap is not None:\n bound = self._getRawBound()\n\n if bound is None:\n bound = self._getColormapRange()[self._index]\n return bound\n else:\n return 1. # Fallback", "def get_bound_circuit(self, theta):\r\n param_dict = self.get_param_dict(theta)\r\n\r\n return self.circuit.bind_parameters(param_dict)", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages" ]
[ "0.8117996", "0.67920166", "0.6751894", "0.67239743", "0.6467672", "0.62063897", "0.6088784", "0.6081014", "0.6081014", "0.6081014", "0.6081014", "0.6081014", "0.6081014", "0.6081014", "0.6081014", "0.6065061", "0.5971675", "0.59263587", "0.59000784", "0.59000784", "0.5864605", "0.5833809", "0.58046234", "0.58005166", "0.57962644", "0.5790428", "0.57882977", "0.5761463", "0.5748048", "0.57475996" ]
0.7855516
1
Obtains bound information for one variable. getvarbound(self,i_)
def getvarbound(self,i_): bk_ = ctypes.c_int32() bl_ = ctypes.c_double() bu_ = ctypes.c_double() res = __library__.MSK_XX_getvarbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _bk_return_value = boundkey(bk_.value) bl_ = bl_.value _bl_return_value = bl_ bu_ = bu_.value _bu_return_value = bu_ return (_bk_return_value,_bl_return_value,_bu_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvarbound(self,i_): # 3\n res,resargs = self.__obj.getvarbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def getconbound(self,i_): # 3\n res,resargs = self.__obj.getconbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def getconbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getconbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)", "def _get_one_bound(self, param_name):\n return getattr(self, '__' + param_name + '_bounds')", "def putvarbound(self,j_,bk_,bl_,bu_): # 3\n if not isinstance(bk_,boundkey): raise TypeError(\"Argument bk has wrong type\")\n res = self.__obj.putvarbound(j_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarbound(self,j_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarbound(self.__nativep,j_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getbound(self,accmode_,i_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res,resargs = self.__obj.getbound(accmode_,i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def chgvarbound(self,j_,lower_,finite_,value_): # 3\n res = self.__obj.chgvarbound(j_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def lookup_var(self, var):\n if var in self.binding:\n return self.binding[var]\n elif self.parent is not None:\n return self.parent.lookup_var(var)\n else:\n raise Environment.Unbound('unbound variable \"%s\"' % var)", "def bound(name):", "def variable_bounds(problem):\n return ([\n problem['state_bounds'][var] if problem['state_bounds'][var] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] - 1)\n for var in range(problem['num_states'])\n ] + [\n problem['input_bounds'][inp] if problem['input_bounds'][inp] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] + 1)\n for inp in range(problem['num_inputs'])\n ]) * problem['Nv'] + ([(0.01, np.inf)] if problem['T'] == 0 else []) \\\n if problem['state_bounds'] is not None else None", "def chgvarbound(self,j_,lower_,finite_,value_):\n res = __library__.MSK_XX_chgvarbound(self.__nativep,j_,lower_,finite_,value_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def incBound(self):\n # increment the bound for the local variables.\n self.incVariableList()\n super().incBound()\n\n # get the last variable.\n idx = len(self.stateNormalPath) - 1\n assert(idx > 0)\n\n # we add the constraints that specify the id of the transition\n self.addConstraintOnIdTransition(idx)", "def GetBounds(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates", "def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds", "def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def dynamically_bound(*args, **kwargs):\n return DynamicVar(*args, **kwargs)", "def num_vars(self):\n return len(self.bounds.lb)", "def get_bindable_vars(self):\n return self.local_vars.keys() + self.parent.get_bindable_vars()", "def bound_for(self, name):\n if '.' in name:\n module, name = name.split('.', 1)\n if module in self._modules:\n return self.__getattr__(module).bound_for(name)\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no module %s' % (name, type(self).__name__, module))\n else:\n if name in self._parameters:\n return self._bounds[name]\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no parameter %s' % (name, type(self).__name__, module))", "def nvar(self):\n return len(self.__vars)", "def _get_reference_by_variable(self, var):\n if not var[0] == consts.VARIABLE:\n raise Exception('Internal error: Expected a variable, got: \"%r\"' % var)\n res = self._bindings.get(var, var)\n if res == consts.TOPIC_IN_FOCUS:\n res = self.focus\n while res[0] == consts.VARIABLE and self.parent:\n res = self.parent._get_reference_by_variable(res) #pylint: disable-msg=W0212\n if res == consts.TOPIC_IN_FOCUS:\n res = self.focus\n return res", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def vars(self) -> {(EVar, Pool)}:\n raise NotImplementedError()", "def _getBound(self):\n if self._colormap is not None:\n bound = self._getRawBound()\n\n if bound is None:\n bound = self._getColormapRange()[self._index]\n return bound\n else:\n return 1. # Fallback", "def named_parameter_bounds(self):\n for name, _ in self.named_parameters():\n yield name, self.bound_for(name)", "def variable_integrate(self, variable, *bounds):\n try:\n index = list(self.variables).index(variable)\n except ValueError:\n index = None\n if index is not None:\n indef_integral = self.head.integrate_indefinite_index(type(self), self.data, self, index)\n if bounds:\n low, high = bounds\n return indef_integral.variable_subs(variable, high) - indef_integral.variable_subs(variable, low)\n return indef_integral\n raise NotImplementedError(`self.variables, variable, index`)", "def cb_bounds(self, variable, results_dict, keys, fixed_bounds):\n tas_bound, pr_bound = fixed_bounds\n if variable == \"tas\":\n if tas_bound:\n bound_limit = tas_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict, keys)\n cmap = plt.cm.RdBu_r\n else:\n if pr_bound:\n bound_limit = pr_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict,\n keys,\n avg_over=25)\n cmap = plt.cm.BrBG\n bounds = np.linspace(-1 * bound_limit, bound_limit, 11)\n return [bounds, cmap]" ]
[ "0.88780814", "0.6618329", "0.6337327", "0.6297966", "0.6271888", "0.6069767", "0.606333", "0.603762", "0.59093964", "0.5864278", "0.5855671", "0.57791764", "0.5742324", "0.5674004", "0.56727177", "0.56428105", "0.5599474", "0.55750924", "0.5573686", "0.5553733", "0.554883", "0.5534935", "0.54801404", "0.54507244", "0.5406407", "0.53483266", "0.5333609", "0.53182", "0.53138274", "0.53069663" ]
0.8567469
1
Obtains bounds information for a slice of the constraints. getconboundslice(self,first_,last_,bk_,bl_,bu_)
def getconboundslice(self,first_,last_,bk_,bl_,bu_): _bk_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bk_ is not None and len(bk_) != ((last_) - (first_)): raise ValueError("Array argument bk is not long enough: Is %d, expected %d" % (len(bk_),((last_) - (first_)))) if isinstance(bk_,numpy.ndarray) and not bk_.flags.writeable: raise ValueError("Argument bk must be writable") if bk_ is not None: _bk_tmp = (ctypes.c_int32 * len(bk_))() else: _bk_tmp = None _bl_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bl_ is not None and len(bl_) != ((last_) - (first_)): raise ValueError("Array argument bl is not long enough: Is %d, expected %d" % (len(bl_),((last_) - (first_)))) if isinstance(bl_,numpy.ndarray) and not bl_.flags.writeable: raise ValueError("Argument bl must be writable") if isinstance(bl_, numpy.ndarray) and bl_.dtype is numpy.dtype(numpy.float64) and bl_.flags.contiguous: _bl_copyarray = False _bl_tmp = ctypes.cast(bl_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif bl_ is not None: _bl_copyarray = True _bl_np_tmp = numpy.zeros(len(bl_),numpy.dtype(numpy.float64)) _bl_np_tmp[:] = bl_ assert _bl_np_tmp.flags.contiguous _bl_tmp = ctypes.cast(_bl_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _bl_copyarray = False _bl_tmp = None _bu_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bu_ is not None and len(bu_) != ((last_) - (first_)): raise ValueError("Array argument bu is not long enough: Is %d, expected %d" % (len(bu_),((last_) - (first_)))) if isinstance(bu_,numpy.ndarray) and not bu_.flags.writeable: raise ValueError("Argument bu must be writable") if isinstance(bu_, numpy.ndarray) and bu_.dtype is numpy.dtype(numpy.float64) and bu_.flags.contiguous: _bu_copyarray = False _bu_tmp = ctypes.cast(bu_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif bu_ is not None: _bu_copyarray = True _bu_np_tmp = numpy.zeros(len(bu_),numpy.dtype(numpy.float64)) _bu_np_tmp[:] = bu_ assert _bu_np_tmp.flags.contiguous _bu_tmp = ctypes.cast(_bu_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _bu_copyarray = False _bu_tmp = None res = __library__.MSK_XX_getconboundslice(self.__nativep,first_,last_,_bk_tmp,_bl_tmp,_bu_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if bk_ is not None: bk_[:] = [ boundkey(v) for v in _bk_tmp[0:len(bk_)] ] if _bl_copyarray: bl_[:] = _bl_np_tmp if _bu_copyarray: bu_[:] = _bu_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconboundslice(self,first_,last_,bk,bl,bu): # 3\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getconboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def getvarboundslice(self,first_,last_,bk,bl,bu): # 3\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getvarboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def getvarboundslice(self,first_,last_,bk_,bl_,bu_):\n _bk_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk is not long enough: Is %d, expected %d\" % (len(bk_),((last_) - (first_))))\n if isinstance(bk_,numpy.ndarray) and not bk_.flags.writeable:\n raise ValueError(\"Argument bk must be writable\")\n if bk_ is not None:\n _bk_tmp = (ctypes.c_int32 * len(bk_))()\n else:\n _bk_tmp = None\n _bl_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl is not long enough: Is %d, expected %d\" % (len(bl_),((last_) - (first_))))\n if isinstance(bl_,numpy.ndarray) and not bl_.flags.writeable:\n raise ValueError(\"Argument bl must be writable\")\n if isinstance(bl_, numpy.ndarray) and bl_.dtype is numpy.dtype(numpy.float64) and bl_.flags.contiguous:\n _bl_copyarray = False\n _bl_tmp = ctypes.cast(bl_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bl_ is not None:\n _bl_copyarray = True\n _bl_np_tmp = numpy.zeros(len(bl_),numpy.dtype(numpy.float64))\n _bl_np_tmp[:] = bl_\n assert _bl_np_tmp.flags.contiguous\n _bl_tmp = ctypes.cast(_bl_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bl_copyarray = False\n _bl_tmp = None\n \n _bu_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu is not long enough: Is %d, expected %d\" % (len(bu_),((last_) - (first_))))\n if isinstance(bu_,numpy.ndarray) and not bu_.flags.writeable:\n raise ValueError(\"Argument bu must be writable\")\n if isinstance(bu_, numpy.ndarray) and bu_.dtype is numpy.dtype(numpy.float64) and bu_.flags.contiguous:\n _bu_copyarray = False\n _bu_tmp = ctypes.cast(bu_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bu_ is not None:\n _bu_copyarray = True\n _bu_np_tmp = numpy.zeros(len(bu_),numpy.dtype(numpy.float64))\n _bu_np_tmp[:] = bu_\n assert _bu_np_tmp.flags.contiguous\n _bu_tmp = ctypes.cast(_bu_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bu_copyarray = False\n _bu_tmp = None\n \n res = __library__.MSK_XX_getvarboundslice(self.__nativep,first_,last_,_bk_tmp,_bl_tmp,_bu_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if bk_ is not None: bk_[:] = [ boundkey(v) for v in _bk_tmp[0:len(bk_)] ]\n if _bl_copyarray:\n bl_[:] = _bl_np_tmp\n if _bu_copyarray:\n bu_[:] = _bu_np_tmp", "def getboundslice(self,accmode_,first_,last_,bk,bl,bu): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getboundslice(accmode_,first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def putconboundslice(self,first_,last_,bk,bl,bu): # 3\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putconboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconboundsliceconst(self,first_,last_,bkc_,blc_,buc_):\n res = __library__.MSK_XX_putconboundsliceconst(self.__nativep,first_,last_,bkc_,blc_,buc_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconboundslice(self,first_,last_,bkc_,blc_,buc_):\n _bkc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bkc_ is not None and len(bkc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bkc is not long enough: Is %d, expected %d\" % (len(bkc_),((last_) - (first_))))\n if bkc_ is None:\n raise ValueError(\"Argument bkc cannot be None\")\n if bkc_ is None:\n raise ValueError(\"Argument bkc may not be None\")\n if bkc_ is not None:\n _bkc_tmp = (ctypes.c_int32 * len(bkc_))(*bkc_)\n else:\n _bkc_tmp = None\n _blc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and blc_ is not None and len(blc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument blc is not long enough: Is %d, expected %d\" % (len(blc_),((last_) - (first_))))\n if blc_ is None:\n raise ValueError(\"Argument blc cannot be None\")\n if blc_ is None:\n raise ValueError(\"Argument blc may not be None\")\n if isinstance(blc_, numpy.ndarray) and blc_.dtype is numpy.dtype(numpy.float64) and blc_.flags.contiguous:\n _blc_copyarray = False\n _blc_tmp = ctypes.cast(blc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blc_ is not None:\n _blc_copyarray = True\n _blc_np_tmp = numpy.zeros(len(blc_),numpy.dtype(numpy.float64))\n _blc_np_tmp[:] = blc_\n assert _blc_np_tmp.flags.contiguous\n _blc_tmp = ctypes.cast(_blc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blc_copyarray = False\n _blc_tmp = None\n \n _buc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and buc_ is not None and len(buc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument buc is not long enough: Is %d, expected %d\" % (len(buc_),((last_) - (first_))))\n if buc_ is None:\n raise ValueError(\"Argument buc cannot be None\")\n if buc_ is None:\n raise ValueError(\"Argument buc may not be None\")\n if isinstance(buc_, numpy.ndarray) and buc_.dtype is numpy.dtype(numpy.float64) and buc_.flags.contiguous:\n _buc_copyarray = False\n _buc_tmp = ctypes.cast(buc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif buc_ is not None:\n _buc_copyarray = True\n _buc_np_tmp = numpy.zeros(len(buc_),numpy.dtype(numpy.float64))\n _buc_np_tmp[:] = buc_\n assert _buc_np_tmp.flags.contiguous\n _buc_tmp = ctypes.cast(_buc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _buc_copyarray = False\n _buc_tmp = None\n \n res = __library__.MSK_XX_putconboundslice(self.__nativep,first_,last_,_bkc_tmp,_blc_tmp,_buc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putboundslice(self,con_,first_,last_,bk,bl,bu): # 3\n if not isinstance(con_,accmode): raise TypeError(\"Argument con has wrong type\")\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putboundslice(con_,first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarboundsliceconst(self,first_,last_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarboundsliceconst(self.__nativep,first_,last_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarboundslice(self,first_,last_,bkx_,blx_,bux_):\n _bkx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bkx_ is not None and len(bkx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bkx is not long enough: Is %d, expected %d\" % (len(bkx_),((last_) - (first_))))\n if bkx_ is None:\n raise ValueError(\"Argument bkx cannot be None\")\n if bkx_ is None:\n raise ValueError(\"Argument bkx may not be None\")\n if bkx_ is not None:\n _bkx_tmp = (ctypes.c_int32 * len(bkx_))(*bkx_)\n else:\n _bkx_tmp = None\n _blx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and blx_ is not None and len(blx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument blx is not long enough: Is %d, expected %d\" % (len(blx_),((last_) - (first_))))\n if blx_ is None:\n raise ValueError(\"Argument blx cannot be None\")\n if blx_ is None:\n raise ValueError(\"Argument blx may not be None\")\n if isinstance(blx_, numpy.ndarray) and blx_.dtype is numpy.dtype(numpy.float64) and blx_.flags.contiguous:\n _blx_copyarray = False\n _blx_tmp = ctypes.cast(blx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blx_ is not None:\n _blx_copyarray = True\n _blx_np_tmp = numpy.zeros(len(blx_),numpy.dtype(numpy.float64))\n _blx_np_tmp[:] = blx_\n assert _blx_np_tmp.flags.contiguous\n _blx_tmp = ctypes.cast(_blx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blx_copyarray = False\n _blx_tmp = None\n \n _bux_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bux_ is not None and len(bux_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bux is not long enough: Is %d, expected %d\" % (len(bux_),((last_) - (first_))))\n if bux_ is None:\n raise ValueError(\"Argument bux cannot be None\")\n if bux_ is None:\n raise ValueError(\"Argument bux may not be None\")\n if isinstance(bux_, numpy.ndarray) and bux_.dtype is numpy.dtype(numpy.float64) and bux_.flags.contiguous:\n _bux_copyarray = False\n _bux_tmp = ctypes.cast(bux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bux_ is not None:\n _bux_copyarray = True\n _bux_np_tmp = numpy.zeros(len(bux_),numpy.dtype(numpy.float64))\n _bux_np_tmp[:] = bux_\n assert _bux_np_tmp.flags.contiguous\n _bux_tmp = ctypes.cast(_bux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bux_copyarray = False\n _bux_tmp = None\n \n res = __library__.MSK_XX_putvarboundslice(self.__nativep,first_,last_,_bkx_tmp,_blx_tmp,_bux_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarboundslice(self,first_,last_,bk,bl,bu): # 3\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putvarboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def bounds(self): # -> tuple[()]:\n ...", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def bounds(self):\n return self.substrates.bounds", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def get_bounds():\n return [0.00], [1.00]", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]" ]
[ "0.83687973", "0.7650801", "0.7564731", "0.7493438", "0.74723506", "0.7434169", "0.7281272", "0.72708595", "0.69236696", "0.6754937", "0.67351925", "0.6625706", "0.6625706", "0.6625706", "0.6625706", "0.6625706", "0.6625706", "0.6625706", "0.6625706", "0.64139324", "0.6381346", "0.6343208", "0.6342877", "0.63336277", "0.630237", "0.6283226", "0.6239908", "0.6231917", "0.62097055", "0.61555845" ]
0.80538183
1
Obtains bounds information for a slice of the variables. getvarboundslice(self,first_,last_,bk_,bl_,bu_)
def getvarboundslice(self,first_,last_,bk_,bl_,bu_): _bk_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bk_ is not None and len(bk_) != ((last_) - (first_)): raise ValueError("Array argument bk is not long enough: Is %d, expected %d" % (len(bk_),((last_) - (first_)))) if isinstance(bk_,numpy.ndarray) and not bk_.flags.writeable: raise ValueError("Argument bk must be writable") if bk_ is not None: _bk_tmp = (ctypes.c_int32 * len(bk_))() else: _bk_tmp = None _bl_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bl_ is not None and len(bl_) != ((last_) - (first_)): raise ValueError("Array argument bl is not long enough: Is %d, expected %d" % (len(bl_),((last_) - (first_)))) if isinstance(bl_,numpy.ndarray) and not bl_.flags.writeable: raise ValueError("Argument bl must be writable") if isinstance(bl_, numpy.ndarray) and bl_.dtype is numpy.dtype(numpy.float64) and bl_.flags.contiguous: _bl_copyarray = False _bl_tmp = ctypes.cast(bl_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif bl_ is not None: _bl_copyarray = True _bl_np_tmp = numpy.zeros(len(bl_),numpy.dtype(numpy.float64)) _bl_np_tmp[:] = bl_ assert _bl_np_tmp.flags.contiguous _bl_tmp = ctypes.cast(_bl_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _bl_copyarray = False _bl_tmp = None _bu_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and bu_ is not None and len(bu_) != ((last_) - (first_)): raise ValueError("Array argument bu is not long enough: Is %d, expected %d" % (len(bu_),((last_) - (first_)))) if isinstance(bu_,numpy.ndarray) and not bu_.flags.writeable: raise ValueError("Argument bu must be writable") if isinstance(bu_, numpy.ndarray) and bu_.dtype is numpy.dtype(numpy.float64) and bu_.flags.contiguous: _bu_copyarray = False _bu_tmp = ctypes.cast(bu_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif bu_ is not None: _bu_copyarray = True _bu_np_tmp = numpy.zeros(len(bu_),numpy.dtype(numpy.float64)) _bu_np_tmp[:] = bu_ assert _bu_np_tmp.flags.contiguous _bu_tmp = ctypes.cast(_bu_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _bu_copyarray = False _bu_tmp = None res = __library__.MSK_XX_getvarboundslice(self.__nativep,first_,last_,_bk_tmp,_bl_tmp,_bu_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if bk_ is not None: bk_[:] = [ boundkey(v) for v in _bk_tmp[0:len(bk_)] ] if _bl_copyarray: bl_[:] = _bl_np_tmp if _bu_copyarray: bu_[:] = _bu_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvarboundslice(self,first_,last_,bk,bl,bu): # 3\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getvarboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def putvarboundsliceconst(self,first_,last_,bkx_,blx_,bux_):\n res = __library__.MSK_XX_putvarboundsliceconst(self.__nativep,first_,last_,bkx_,blx_,bux_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarboundslice(self,first_,last_,bk,bl,bu): # 3\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putvarboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarboundslice(self,first_,last_,bkx_,blx_,bux_):\n _bkx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bkx_ is not None and len(bkx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bkx is not long enough: Is %d, expected %d\" % (len(bkx_),((last_) - (first_))))\n if bkx_ is None:\n raise ValueError(\"Argument bkx cannot be None\")\n if bkx_ is None:\n raise ValueError(\"Argument bkx may not be None\")\n if bkx_ is not None:\n _bkx_tmp = (ctypes.c_int32 * len(bkx_))(*bkx_)\n else:\n _bkx_tmp = None\n _blx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and blx_ is not None and len(blx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument blx is not long enough: Is %d, expected %d\" % (len(blx_),((last_) - (first_))))\n if blx_ is None:\n raise ValueError(\"Argument blx cannot be None\")\n if blx_ is None:\n raise ValueError(\"Argument blx may not be None\")\n if isinstance(blx_, numpy.ndarray) and blx_.dtype is numpy.dtype(numpy.float64) and blx_.flags.contiguous:\n _blx_copyarray = False\n _blx_tmp = ctypes.cast(blx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blx_ is not None:\n _blx_copyarray = True\n _blx_np_tmp = numpy.zeros(len(blx_),numpy.dtype(numpy.float64))\n _blx_np_tmp[:] = blx_\n assert _blx_np_tmp.flags.contiguous\n _blx_tmp = ctypes.cast(_blx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blx_copyarray = False\n _blx_tmp = None\n \n _bux_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bux_ is not None and len(bux_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bux is not long enough: Is %d, expected %d\" % (len(bux_),((last_) - (first_))))\n if bux_ is None:\n raise ValueError(\"Argument bux cannot be None\")\n if bux_ is None:\n raise ValueError(\"Argument bux may not be None\")\n if isinstance(bux_, numpy.ndarray) and bux_.dtype is numpy.dtype(numpy.float64) and bux_.flags.contiguous:\n _bux_copyarray = False\n _bux_tmp = ctypes.cast(bux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bux_ is not None:\n _bux_copyarray = True\n _bux_np_tmp = numpy.zeros(len(bux_),numpy.dtype(numpy.float64))\n _bux_np_tmp[:] = bux_\n assert _bux_np_tmp.flags.contiguous\n _bux_tmp = ctypes.cast(_bux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bux_copyarray = False\n _bux_tmp = None\n \n res = __library__.MSK_XX_putvarboundslice(self.__nativep,first_,last_,_bkx_tmp,_blx_tmp,_bux_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getconboundslice(self,first_,last_,bk,bl,bu): # 3\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getconboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def getconboundslice(self,first_,last_,bk_,bl_,bu_):\n _bk_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk is not long enough: Is %d, expected %d\" % (len(bk_),((last_) - (first_))))\n if isinstance(bk_,numpy.ndarray) and not bk_.flags.writeable:\n raise ValueError(\"Argument bk must be writable\")\n if bk_ is not None:\n _bk_tmp = (ctypes.c_int32 * len(bk_))()\n else:\n _bk_tmp = None\n _bl_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl is not long enough: Is %d, expected %d\" % (len(bl_),((last_) - (first_))))\n if isinstance(bl_,numpy.ndarray) and not bl_.flags.writeable:\n raise ValueError(\"Argument bl must be writable\")\n if isinstance(bl_, numpy.ndarray) and bl_.dtype is numpy.dtype(numpy.float64) and bl_.flags.contiguous:\n _bl_copyarray = False\n _bl_tmp = ctypes.cast(bl_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bl_ is not None:\n _bl_copyarray = True\n _bl_np_tmp = numpy.zeros(len(bl_),numpy.dtype(numpy.float64))\n _bl_np_tmp[:] = bl_\n assert _bl_np_tmp.flags.contiguous\n _bl_tmp = ctypes.cast(_bl_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bl_copyarray = False\n _bl_tmp = None\n \n _bu_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu is not long enough: Is %d, expected %d\" % (len(bu_),((last_) - (first_))))\n if isinstance(bu_,numpy.ndarray) and not bu_.flags.writeable:\n raise ValueError(\"Argument bu must be writable\")\n if isinstance(bu_, numpy.ndarray) and bu_.dtype is numpy.dtype(numpy.float64) and bu_.flags.contiguous:\n _bu_copyarray = False\n _bu_tmp = ctypes.cast(bu_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bu_ is not None:\n _bu_copyarray = True\n _bu_np_tmp = numpy.zeros(len(bu_),numpy.dtype(numpy.float64))\n _bu_np_tmp[:] = bu_\n assert _bu_np_tmp.flags.contiguous\n _bu_tmp = ctypes.cast(_bu_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bu_copyarray = False\n _bu_tmp = None\n \n res = __library__.MSK_XX_getconboundslice(self.__nativep,first_,last_,_bk_tmp,_bl_tmp,_bu_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if bk_ is not None: bk_[:] = [ boundkey(v) for v in _bk_tmp[0:len(bk_)] ]\n if _bl_copyarray:\n bl_[:] = _bl_np_tmp\n if _bu_copyarray:\n bu_[:] = _bu_np_tmp", "def getboundslice(self,accmode_,first_,last_,bk,bl,bu): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n _copyback_bk = False\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n _copyback_bk = True\n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n _copyback_bk = True\n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n _copyback_bl = False\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n _copyback_bl = True\n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n _copyback_bl = True\n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n _copyback_bu = False\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n _copyback_bu = True\n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n _copyback_bu = True\n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.getboundslice(accmode_,first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_bu:\n bu[:] = _tmparr_bu\n if _copyback_bl:\n bl[:] = _tmparr_bl\n if _copyback_bk:\n for __tmp_var_0 in range(len(bk_)): bk[__tmp_var_0] = boundkey(_tmparr_bk[__tmp_var_0])", "def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds", "def getvarbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getvarbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def putconboundslice(self,first_,last_,bk,bl,bu): # 3\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putconboundslice(first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def getvarbound(self,i_): # 3\n res,resargs = self.__obj.getvarbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def putboundslice(self,con_,first_,last_,bk,bl,bu): # 3\n if not isinstance(con_,accmode): raise TypeError(\"Argument con has wrong type\")\n if bk is None: raise TypeError(\"Invalid type for argument bk\")\n if bk is None:\n bk_ = None\n else:\n try:\n bk_ = memoryview(bk)\n except TypeError:\n try:\n _tmparr_bk = array.array(\"i\",bk)\n except TypeError:\n raise TypeError(\"Argument bk has wrong type\")\n else:\n bk_ = memoryview(_tmparr_bk)\n \n else:\n if bk_.format != \"i\":\n bk_ = memoryview(array.array(\"i\",bk))\n \n if bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk has wrong length\")\n if bl is None: raise TypeError(\"Invalid type for argument bl\")\n if bl is None:\n bl_ = None\n else:\n try:\n bl_ = memoryview(bl)\n except TypeError:\n try:\n _tmparr_bl = array.array(\"d\",bl)\n except TypeError:\n raise TypeError(\"Argument bl has wrong type\")\n else:\n bl_ = memoryview(_tmparr_bl)\n \n else:\n if bl_.format != \"d\":\n bl_ = memoryview(array.array(\"d\",bl))\n \n if bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl has wrong length\")\n if bu is None: raise TypeError(\"Invalid type for argument bu\")\n if bu is None:\n bu_ = None\n else:\n try:\n bu_ = memoryview(bu)\n except TypeError:\n try:\n _tmparr_bu = array.array(\"d\",bu)\n except TypeError:\n raise TypeError(\"Argument bu has wrong type\")\n else:\n bu_ = memoryview(_tmparr_bu)\n \n else:\n if bu_.format != \"d\":\n bu_ = memoryview(array.array(\"d\",bu))\n \n if bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu has wrong length\")\n res = self.__obj.putboundslice(con_,first_,last_,bk_,bl_,bu_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_bounds():\n return [0.00], [1.00]", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def bounds(self, pos):", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def variable_bounds(problem):\n return ([\n problem['state_bounds'][var] if problem['state_bounds'][var] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] - 1)\n for var in range(problem['num_states'])\n ] + [\n problem['input_bounds'][inp] if problem['input_bounds'][inp] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] + 1)\n for inp in range(problem['num_inputs'])\n ]) * problem['Nv'] + ([(0.01, np.inf)] if problem['T'] == 0 else []) \\\n if problem['state_bounds'] is not None else None", "def cb_bounds(self, variable, results_dict, keys, fixed_bounds):\n tas_bound, pr_bound = fixed_bounds\n if variable == \"tas\":\n if tas_bound:\n bound_limit = tas_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict, keys)\n cmap = plt.cm.RdBu_r\n else:\n if pr_bound:\n bound_limit = pr_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict,\n keys,\n avg_over=25)\n cmap = plt.cm.BrBG\n bounds = np.linspace(-1 * bound_limit, bound_limit, 11)\n return [bounds, cmap]", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)" ]
[ "0.8427269", "0.76399803", "0.7581138", "0.72763824", "0.7077902", "0.6877357", "0.67139685", "0.65998864", "0.64918834", "0.63755333", "0.63566226", "0.6347929", "0.63300616", "0.62415", "0.6238382", "0.62190634", "0.62190634", "0.62190634", "0.62190634", "0.62190634", "0.62190634", "0.62190634", "0.62190634", "0.61316663", "0.6107121", "0.61058193", "0.6092169", "0.60883737", "0.6042942", "0.60274386" ]
0.8042054
1
Obtains one objective coefficient. getcj(self,j_)
def getcj(self,j_): cj_ = ctypes.c_double() res = __library__.MSK_XX_getcj(self.__nativep,j_,ctypes.byref(cj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) cj_ = cj_.value _cj_return_value = cj_ return (_cj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getcj(self,j_): # 3\n res,resargs = self.__obj.getcj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _cj_return_value = resargs\n return _cj_return_value", "def jac(self):\n return self.unit_jac if self._jac is None else self._jac", "def getCoefficient(self):\n return _libsbml.FluxObjective_getCoefficient(self)", "def J(self):\n return self._J", "def J(self) -> float:\n return self._J", "def jaccard_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + b + c)", "def _get_jacobian(self):\n srcs, recs = self.srcs, self.recs\n if not self.sparse:\n jac = numpy.array(\n [ttime2d.straight([cell], '', srcs, recs, velocity=1.)\n for cell in self.mesh]).T\n else:\n shoot = ttime2d.straight\n nonzero = []\n extend = nonzero.extend\n for j, c in enumerate(self.mesh):\n extend((i, j, tt)\n for i, tt in enumerate(shoot([c], '', srcs, recs,\n velocity=1.))\n if tt != 0)\n row, col, val = numpy.array(nonzero).T\n shape = (self.ndata, self.nparams)\n jac = scipy.sparse.csr_matrix((val, (row, col)), shape)\n return jac", "def jval(self):\n return self.q * self.model.nobs_moms", "def j_nc_from_j(self, j, inverse=False, check_bounds=False):\n if not inverse:\n if check_bounds:\n assert_between(j, 0, self._grid_shape[1]-1)\n j_nc = self._nc_j0 + j * self._nc_jskip\n if check_bounds:\n assert_between(j_nc, 0, self._nc_xdim)\n return j_nc\n else:\n j_nc = j\n if check_bounds:\n assert_between(j_nc, 0, self._nc_ydim)\n j = (j_nc - self._nc_j0)/self._nc_jskip\n if check_bounds:\n assert_between(j, 0, self._grid_shape[1]-1)\n return j", "def GetJ(self, *args):\n return _table.Table_GetJ(self, *args)", "def c(self,j,i_j):\n \"\"\" The index j of the chains goes from 0 to k-1 (where k is the \n number of chains in our decomposition \"\"\"\n assert j < len(self.lcd), \"j must be the index of a chain\"\n \"\"\" The index i_j goes from 0 to len(lcd[j]) this range is one longer\n than the length of the chain because we go from {} to the full chain. \"\"\"\n assert i_j <= self.lcd_dims[j], \"i_j = {}, dims[j] = {}\".format(i_j, self.lcd_dims[j])\n if i_j == 0:\n return None\n else:\n return self.lcd[j][i_j-1]", "def objective(self):\n return self._objective", "def compute_j(self, trajectory):\r\n J = 0\r\n for i, (_,_,r,_) in enumerate(trajectory):\r\n J += (self.domain.discount**i) * r\r\n return J", "def doublec(self, j):\n\t\tif j < (self.k0 + 1):\n\t\t\treturn 0\n\t\tif (self.b[j] != self.b[j-1]):\n\t\t\treturn 0\n\t\treturn self.cons(j)", "def C_ret(img):\r\n return solveJ(img, 0, tc(img, ignore_ch=2))", "def coefficients_from_j(j, minimal_twist=True):\n try:\n K = j.parent()\n except AttributeError:\n K = rings.RationalField()\n if K not in _Fields:\n K = K.fraction_field()\n\n char = K.characteristic()\n if char == 2:\n if j == 0:\n return Sequence([0, 0, 1, 0, 0], universe=K)\n else:\n return Sequence([1, 0, 0, 0, 1/j], universe=K)\n if char == 3:\n if j == 0:\n return Sequence([0, 0, 0, 1, 0], universe=K)\n else:\n return Sequence([0, j, 0, 0, -j**2], universe=K)\n\n if K is rings.RationalField():\n # we construct the minimal twist, i.e. the curve with minimal\n # conductor with this j_invariant:\n if j == 0:\n return Sequence([0, 0, 1, 0, 0], universe=K) # 27a3\n if j == 1728:\n return Sequence([0, 0, 0, -1, 0], universe=K) # 32a2\n\n if not minimal_twist:\n k = j-1728\n return Sequence([0, 0, 0, -3*j*k, -2*j*k**2], universe=K)\n\n n = j.numerator()\n m = n-1728*j.denominator()\n a4 = -3*n*m\n a6 = -2*n*m**2\n\n # Now E=[0,0,0,a4,a6] has j-invariant j=n/d\n from sage.sets.set import Set\n for p in Set(n.prime_divisors()+m.prime_divisors()):\n e = min(a4.valuation(p)//2, a6.valuation(p)//3)\n if e & gt\n 0:\n p = p**e\n a4 /= p**2\n a6 /= p**3\n\n # Now E=[0,0,0,a4,a6] is minimal at all p != 2,3\n tw = [-1, 2, -2, 3, -3, 6, -6]\n E1 = EllipticCurve([0, 0, 0, a4, a6])\n Elist = [E1] + [E1.quadratic_twist(t) for t in tw]\n Elist.sort(key=lambda E: E.conductor())\n return Sequence(Elist[0].ainvs())\n\n # defaults for all other fields:\n if j == 0:\n return Sequence([0, 0, 0, 0, 1], universe=K)\n if j == 1728:\n return Sequence([0, 0, 0, 1, 0], universe=K)\n k = j-1728\n return Sequence([0, 0, 0, -3*j*k, -2*j*k**2], universe=K)", "def jacobian(self, c):\n\n raise NotImplementedError", "def coefficient(self) -> float:\n ...", "def putcj(self,j_,cj_): # 3\n res = self.__obj.putcj(j_,cj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def jacobian_c(self, x, out=None, **kwargs):\n return empty_matrix(0, self.nx)", "def JCoeff(l, m, s, lp, mp, sp):\n coeff = int((l == lp) & (m == -mp) & (s == sp))*1j*(-1)**(int(m-1/2))*s\n return coeff", "def getaij(self,i_,j_): # 3\n res,resargs = self.__obj.getaij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _aij_return_value = resargs\n return _aij_return_value", "def conj(self, o): \n return (o.inv()) * self * o", "def get_jac(wcs, cenx, ceny):\n import galsim\n\n crpix = wcs.crpix\n galsim_pos = galsim.PositionD(x=crpix[0], y=crpix[1])\n\n galsim_jac = wcs.jacobian(image_pos=galsim_pos)\n\n return ngmix.Jacobian(\n x=cenx,\n y=ceny,\n dudx=galsim_jac.dudx,\n dudy=galsim_jac.dudy,\n dvdx=galsim_jac.dvdx,\n dvdy=galsim_jac.dvdy,\n )", "def objective(self):\n pass", "def convolved_j0(self, _x, delta_x):\n return (\n j0(_x - 0.5 * delta_x) +\n 4.*j0(_x) +\n j0(_x + 0.5 * delta_x)\n ) / 6.", "def _partial_min_solution(self, j):\n beta_without_j = np.delete(self.betas, j, axis=0)\n X_without_j = np.delete(self.X, j, axis=0)\n X_j = self.X[j] # these are the X values for the jth feature in the model\n # Make predictions and obtain residuals on the full set of Ys, without the effect of the jth predictor included\n R_without_j = (self.Y - (beta_without_j.T @ X_without_j))\n c_j = 2/self.n * (X_j @ R_without_j) # This quantity is described in the notes\n # The following if statements are due to the subgradient of the L1 penality\n if abs(c_j) <= self.lam: # this step is what causes the lasso to shrink coefficients to 0 based on lambda\n return 0\n a_j = 2 * sum(X_j**2) # also described in notes\n if c_j < -self.lam:\n return (c_j + self.lam) / (a_j / self.n)\n elif c_j > self.lam:\n return (c_j - self.lam) / (a_j / self.n)", "def jacobian_c(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_c(x, out=out, **kwargs)", "def getaij(self,i_,j_):\n aij_ = ctypes.c_double()\n res = __library__.MSK_XX_getaij(self.__nativep,i_,j_,ctypes.byref(aij_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n aij_ = aij_.value\n _aij_return_value = aij_\n return (_aij_return_value)", "def coeff(self):\n return self._coeff" ]
[ "0.7897348", "0.694601", "0.649423", "0.64774793", "0.6358544", "0.6241598", "0.62322664", "0.62282366", "0.61798847", "0.6176942", "0.60854423", "0.6068824", "0.60608375", "0.6059429", "0.60535663", "0.6037667", "0.60261023", "0.6021581", "0.6002902", "0.5994126", "0.5985048", "0.5962831", "0.5952473", "0.58765996", "0.5872884", "0.58555174", "0.58223677", "0.5809507", "0.58034927", "0.5784401" ]
0.7652591
1
Obtains the fixed term in the objective. getcfix(self)
def getcfix(self): cfix_ = ctypes.c_double() res = __library__.MSK_XX_getcfix(self.__nativep,ctypes.byref(cfix_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) cfix_ = cfix_.value _cfix_return_value = cfix_ return (_cfix_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getcfix(self): # 3\n res,resargs = self.__obj.getcfix()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _cfix_return_value = resargs\n return _cfix_return_value", "def getCF(self):\r\n return self.lEq;", "def CFL(self):\n return self.__CFL", "def fixed(self):\n return self.f_fixed().m_fixed()", "def fcp(self, var='x'):\n return self.charpoly(var).factor()", "def fx(self) -> float:\n\n return self.intrinsic_matrix[0, 0]", "def getCoefficient(self):\n return _libsbml.FluxObjective_getCoefficient(self)", "def fixed_cost(self):\n return np.einsum('i->', self.c[self.f])", "def tctfdfc(x):\n if isinstance(x,Fdf) :\n pass\n else : \n x = Fdf.constant(x)\n return x", "def cf(self):\n if hasattr(self, \"_cf_cache\"):\n return self._cf_cache\n return np.array([conf.cf for conf in self.configurations], dtype=int)", "def cpf(self):\n return self._cpf", "def get_fx(self):\n return self.fx[:self.nump, :]", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def couleur_fond(self):\n return self.fond * self.ka", "def get_icdf(self, xx):\n return self.parent.ppf(xx)", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def pareto_front_cut(self):\n return self.NDA([kernel.objective_values for kernel in self.kernels \\\n if kernel.objective_values is not None],\n self.reference_point)", "def C(self, y, x):\n return self.minor(y,x).det()*(-1.0)**(y+x+2.0)", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def cdf(self, x):\n from scipy.special import betainc\n sq_x = x * x\n return np.where(\n sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),\n np.ones_like(x))", "def getCDF(self):\n return self.cdfSample", "def getTraitCovarFun(self, term_i):\n assert term_i < self.n_randEffs, 'VarianceDecomposition:: specied term out of range'\n return self.trait_covars[term_i]", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def cdf(self, x):\n\n if type(x) is np.float64:\n x = np.array([x])\n\n ndx = [np.argmin(np.abs(self.xs - x[i])) for i in range(x.size)]\n\n return self.ys[ndx]", "def cdf(self,x):\n if self.base == 'natural':\n cdfValue = (math.log(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n else:\n cdfValue = (math.log10(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n return cdfValue", "def n_cf(self):\n return self._configurations[0].n_cf", "def f_vector(self):\n try:\n return self._f_vector\n except AttributeError:\n self._f_vector = vector(ZZ,[len(x) for x in self.face_lattice().level_sets()])\n return self._f_vector", "def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))", "def xcoeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n return 0.5\n elif b == n or c == n:\n return -1.0\n elif cov > 0.0:\n return _div(cov, min(p1 * q2, p2 * q1))\n elif cov < 0.0:\n return _div(cov, min(n * c, n * b))\n else:\n return 0.0" ]
[ "0.7102011", "0.69387436", "0.61814743", "0.61706793", "0.6118419", "0.6102831", "0.60178024", "0.59100634", "0.58950984", "0.5895022", "0.5848309", "0.58352464", "0.5788822", "0.5725575", "0.57214016", "0.56842697", "0.56441844", "0.56353074", "0.56272084", "0.56272084", "0.5606963", "0.5579843", "0.55653703", "0.55517346", "0.55301386", "0.5513291", "0.5496895", "0.54896843", "0.5489501", "0.5454264" ]
0.723974
0
Obtains a cone. getcone(self,k_,submem_)
def getcone(self,k_,submem_): ct_ = ctypes.c_int32() conepar_ = ctypes.c_double() nummem_ = ctypes.c_int32() _submem_minlength = self.getconeinfo((k_))[2] if self.getconeinfo((k_))[2] > 0 and submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]: raise ValueError("Array argument submem is not long enough: Is %d, expected %d" % (len(submem_),self.getconeinfo((k_))[2])) if isinstance(submem_,numpy.ndarray) and not submem_.flags.writeable: raise ValueError("Argument submem must be writable") if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous: _submem_copyarray = False _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif submem_ is not None: _submem_copyarray = True _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32)) _submem_np_tmp[:] = submem_ assert _submem_np_tmp.flags.contiguous _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _submem_copyarray = False _submem_tmp = None res = __library__.MSK_XX_getcone(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_),_submem_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _ct_return_value = conetype(ct_.value) conepar_ = conepar_.value _conepar_return_value = conepar_ nummem_ = nummem_.value _nummem_return_value = nummem_ if _submem_copyarray: submem_[:] = _submem_np_tmp return (_ct_return_value,_conepar_return_value,_nummem_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def getconeinfo(self,k_): # 3\n res,resargs = self.__obj.getconeinfo(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def getconeinfo(self,k_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconeinfo(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def putcone(self,k_,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.putcone(k_,ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcone(self,k_,ct_,conepar_,submem_):\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem_)\n elif nummem_ != len(submem_):\n raise IndexError(\"Inconsistent length of array submem\")\n if submem_ is None:\n raise ValueError(\"Argument submem cannot be None\")\n if submem_ is None:\n raise ValueError(\"Argument submem may not be None\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_putcone(self.__nativep,k_,ct_,conepar_,nummem_,_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def cone(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True, degree:\n Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio: Union[float,\n bool]=2.0, nodeState: Union[int, bool]=0, pivot: Union[List[float, float, float],\n bool]=None, radius: Union[float, bool]=1.0, sections: Union[int, bool]=8, spans:\n Union[int, bool]=1, startSweep: Union[float, bool]=0, tolerance: Union[float,\n bool]=0.01, useOldInitBehaviour: bool=False, useTolerance: bool=False,\n constructionHistory: bool=True, name: AnyStr=\"\", object: bool=True, polygon: int=0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def appendcone(self,ct_,conepar_,submem_):\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem_)\n elif nummem_ != len(submem_):\n raise IndexError(\"Inconsistent length of array submem\")\n if submem_ is None:\n raise ValueError(\"Argument submem cannot be None\")\n if submem_ is None:\n raise ValueError(\"Argument submem may not be None\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_appendcone(self.__nativep,ct_,conepar_,nummem_,_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getnumcone(self): # 3\n res,resargs = self.__obj.getnumcone()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numcone_return_value = resargs\n return _numcone_return_value", "def getnumcone(self):\n numcone_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumcone(self.__nativep,ctypes.byref(numcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numcone_ = numcone_.value\n _numcone_return_value = numcone_\n return (_numcone_return_value)", "def appendcone(self,ct_,conepar_,submem): # 3\n if not isinstance(ct_,conetype): raise TypeError(\"Argument ct has wrong type\")\n nummem_ = None\n if nummem_ is None:\n nummem_ = len(submem)\n elif nummem_ != len(submem):\n raise IndexError(\"Inconsistent length of array submem\")\n if nummem_ is None: nummem_ = 0\n if submem is None: raise TypeError(\"Invalid type for argument submem\")\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n \n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n \n res = self.__obj.appendcone(ct_,conepar_,nummem_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getnumconemem(self,k_): # 3\n res,resargs = self.__obj.getnumconemem(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nummem_return_value = resargs\n return _nummem_return_value", "def cone(df, mu, N, alphacov=2.0):\r\n return alphacov / ((N + 1.3)**2 + mu)", "def cget(self, *args, **kwargs):\n return self._canvas.cget(*args, **kwargs)", "def caget(PV):\n return epics.caget(PV)", "def getC(self):\n\t\treturn self.c", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def get_coft(self,n,m):\n if self.aggregate is None:\n \n return self.CC.get_coft(n,m)\n \n else:\n \n bn = self.aggregate.which_band[n]\n bm = self.aggregate.which_band[m]\n \n if ((bn == 0) and (bm == 0)):\n \n #print(bn,\"::\",n,m)\n return self.CC._cofts[0,:]\n \n elif ((bn == 1) and (bm == 1)):\n #print(bn,\"::\",n-1,m-1)\n \n return self.CC.get_coft(n-1,m-1)\n \n else:\n \n return self.CC._cofts[0,:]", "def getnumconemem(self,k_):\n nummem_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumconemem(self.__nativep,k_,ctypes.byref(nummem_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n return (_nummem_return_value)", "def create_cone(base=1.0, height=1.0, lat=20, lng=20, color=COLOR_WHITE):\n if lat >= 3 and lng >= 10:\n circlebase = create_circle(base - 0.05, 0.1, [0.0, 0.0, -1.0], color)\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidCone(base, height, lat, lng)\n except:\n if not _ERRS[3]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidCone\")\n _ERRS[3] = True\n glCallList(circlebase)\n glPopMatrix()\n glEndList()\n return obj\n else:\n raise Exception(\n \"La latitud y longitud de la figura deben ser mayores a 3\")", "def aveEy2CC(self):\n if self.dim < 2:\n return None\n if getattr(self, '_aveEy2CC', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n if(self.dim == 2):\n self._aveEy2CC = sp.kron(speye(n[1]), av(n[0]))\n elif(self.dim == 3):\n self._aveEy2CC = kron3(av(n[2]), speye(n[1]), av(n[0]))\n return self._aveEy2CC", "def con_ceq(x,project):\n \n cons = project.con_ceq(x)\n \n if cons: cons = array(cons)\n else: cons = zeros([0])\n \n return cons", "def cone(individual, position, height, width):\n value = 0.0\n for x, p in zip(individual, position):\n value += (x - p)**2\n return height - width * math.sqrt(value)", "def forward_cone(self, p):\n return to_rec(zip(p, self.top))", "def ppl_positive_cone(n):\n gs = ppl.Generator_System(ppl_zero_point(n))\n l = [0]*n\n for i in range(n):\n gs.insert(ppl.ray(ppl.Variable(i)))\n return ppl.C_Polyhedron(gs)", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def appendconeseq(self,ct_,conepar_,nummem_,j_):\n res = __library__.MSK_XX_appendconeseq(self.__nativep,ct_,conepar_,nummem_,j_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getqconk(self,k_,qcsubi,qcsubj,qcval): # 3\n maxnumqcnz_ = self.getnumqconknz((k_))\n if qcsubi is None: raise TypeError(\"Invalid type for argument qcsubi\")\n _copyback_qcsubi = False\n if qcsubi is None:\n qcsubi_ = None\n else:\n try:\n qcsubi_ = memoryview(qcsubi)\n except TypeError:\n try:\n _tmparr_qcsubi = array.array(\"i\",qcsubi)\n except TypeError:\n raise TypeError(\"Argument qcsubi has wrong type\")\n else:\n qcsubi_ = memoryview(_tmparr_qcsubi)\n _copyback_qcsubi = True\n else:\n if qcsubi_.format != \"i\":\n qcsubi_ = memoryview(array.array(\"i\",qcsubi))\n _copyback_qcsubi = True\n if qcsubi_ is not None and len(qcsubi_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubi has wrong length\")\n if qcsubj is None: raise TypeError(\"Invalid type for argument qcsubj\")\n _copyback_qcsubj = False\n if qcsubj is None:\n qcsubj_ = None\n else:\n try:\n qcsubj_ = memoryview(qcsubj)\n except TypeError:\n try:\n _tmparr_qcsubj = array.array(\"i\",qcsubj)\n except TypeError:\n raise TypeError(\"Argument qcsubj has wrong type\")\n else:\n qcsubj_ = memoryview(_tmparr_qcsubj)\n _copyback_qcsubj = True\n else:\n if qcsubj_.format != \"i\":\n qcsubj_ = memoryview(array.array(\"i\",qcsubj))\n _copyback_qcsubj = True\n if qcsubj_ is not None and len(qcsubj_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubj has wrong length\")\n if qcval is None: raise TypeError(\"Invalid type for argument qcval\")\n _copyback_qcval = False\n if qcval is None:\n qcval_ = None\n else:\n try:\n qcval_ = memoryview(qcval)\n except TypeError:\n try:\n _tmparr_qcval = array.array(\"d\",qcval)\n except TypeError:\n raise TypeError(\"Argument qcval has wrong type\")\n else:\n qcval_ = memoryview(_tmparr_qcval)\n _copyback_qcval = True\n else:\n if qcval_.format != \"d\":\n qcval_ = memoryview(array.array(\"d\",qcval))\n _copyback_qcval = True\n if qcval_ is not None and len(qcval_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcval has wrong length\")\n res,resargs = self.__obj.getqconk64(k_,maxnumqcnz_,len(qcsubi),qcsubi_,qcsubj_,qcval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n if _copyback_qcval:\n qcval[:] = _tmparr_qcval\n if _copyback_qcsubj:\n qcsubj[:] = _tmparr_qcsubj\n if _copyback_qcsubi:\n qcsubi[:] = _tmparr_qcsubi\n return _numqcnz_return_value", "def conj(self, o): \n return (o.inv()) * self * o", "def _k_coaffine_pair(pair, bound=math.inf):\n g = pair.graph\n sigma = pair.coaffination\n kg = clique_graph(g, bound)\n coaf_k = dict([])\n for q in kg:\n coaf_k[q] = Clique([sigma[x] for x in q])\n return CoaffinePair(kg, coaf_k)", "def aveEx2CC(self):\n if getattr(self, '_aveEx2CC', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n if(self.dim == 1):\n self._aveEx2CC = speye(n[0])\n elif(self.dim == 2):\n self._aveEx2CC = sp.kron(av(n[1]), speye(n[0]))\n elif(self.dim == 3):\n self._aveEx2CC = kron3(av(n[2]), av(n[1]), speye(n[0]))\n return self._aveEx2CC" ]
[ "0.85851794", "0.74542636", "0.6922491", "0.6714365", "0.6663073", "0.6131412", "0.61234295", "0.60180366", "0.59645844", "0.5906362", "0.5844101", "0.55410284", "0.5464269", "0.5432204", "0.54281247", "0.54012233", "0.5308824", "0.5305274", "0.5274296", "0.52731514", "0.5224948", "0.5197121", "0.51587415", "0.51538336", "0.51530814", "0.5150834", "0.5145124", "0.51368", "0.51285815", "0.5122779" ]
0.783578
1
Obtains information about a cone. getconeinfo(self,k_)
def getconeinfo(self,k_): ct_ = ctypes.c_int32() conepar_ = ctypes.c_double() nummem_ = ctypes.c_int32() res = __library__.MSK_XX_getconeinfo(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _ct_return_value = conetype(ct_.value) conepar_ = conepar_.value _conepar_return_value = conepar_ nummem_ = nummem_.value _nummem_return_value = nummem_ return (_ct_return_value,_conepar_return_value,_nummem_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconeinfo(self,k_): # 3\n res,resargs = self.__obj.getconeinfo(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def get_coulomb_info(self):\n return", "def cfInfo(self, key):\n\n return self.execute_command(self.CF_INFO, key)", "def cainfo(PV):\n return epics.cainfo(PV, False)", "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def getnumcone(self): # 3\n res,resargs = self.__obj.getnumcone()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numcone_return_value = resargs\n return _numcone_return_value", "def getnumcone(self):\n numcone_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumcone(self.__nativep,ctypes.byref(numcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numcone_ = numcone_.value\n _numcone_return_value = numcone_\n return (_numcone_return_value)", "def get_cp_info(self):\n return self.get(COMMAND_CPM, 'GetCpInfo')", "def getC(self):\n\t\treturn self.c", "def getInfo():", "def mychem_info(self):\n return self._mychem_info", "def get_info(self):\n pass", "def get_info(self):\n pass", "def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt", "def ckn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ckn\")", "def getnumconemem(self,k_): # 3\n res,resargs = self.__obj.getnumconemem(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nummem_return_value = resargs\n return _nummem_return_value", "def get_info(self):\n return \"TODO !\"", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def getcone(self,k_,submem_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n _submem_minlength = self.getconeinfo((k_))[2]\n if self.getconeinfo((k_))[2] > 0 and submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem is not long enough: Is %d, expected %d\" % (len(submem_),self.getconeinfo((k_))[2]))\n if isinstance(submem_,numpy.ndarray) and not submem_.flags.writeable:\n raise ValueError(\"Argument submem must be writable\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_getcone(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_),_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n if _submem_copyarray:\n submem_[:] = _submem_np_tmp\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def _general_get_information(self, k=None):\n ## Format k\n ks = self.get_k(k)\n idx_ks = self._get_k_indices(ks)\n ## Get iss\n iss = self.iss\n ## Format idxs\n assert(type(idx_ks) == list)\n neighs = self.get_neighs(idx_ks)\n sp_relative_pos = self.get_sp_rel_pos(idx_ks)\n self.check_output_standards(neighs, sp_relative_pos, ks, iss)\n# print '3'*50, neighs, sp_relative_pos, ks, iss\n return neighs, sp_relative_pos, ks, iss", "def info(self):", "def info(self):", "def get_customer_information(self):\n return self._request_json(\"/api/consumer\")[\"consumer\"]", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def info(self) -> dict:", "def get_info():\r\n app = application.Application()\r\n\r\n app.start(r\"C:\\\\AL50022\\\\Circ\\\\bin\\\\Circ.exe\")\r\n\r\n app.Circ.menu_select(\"View\")", "def closeness_centrality(self):\n try:\n self.logger.info('正在计算网络的接近中心性 ...')\n return self.order_dict(nx.closeness_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))", "def getInfo(self):\n return self.info", "def get_info(self):\n return None", "def info(self):\n print 'A= ', self.application\n print 'C= ', self.city\n print 'D= ', self.dataset.shape" ]
[ "0.88562346", "0.65607196", "0.6062003", "0.6057762", "0.6046421", "0.6028648", "0.57615036", "0.57012206", "0.5669752", "0.56304246", "0.5603641", "0.5487404", "0.5487404", "0.53959966", "0.53923273", "0.53695613", "0.5355254", "0.5324863", "0.531304", "0.5291991", "0.5281199", "0.5281199", "0.5227771", "0.5225178", "0.5184261", "0.5177437", "0.5126269", "0.5116734", "0.5110722", "0.51098424" ]
0.85250115
1
Obtains a sequence of coefficients from the objective. getclist(self,subj_,c_)
def getclist(self,subj_,c_): num_ = None if num_ is None: num_ = len(subj_) elif num_ != len(subj_): raise IndexError("Inconsistent length of array subj") if subj_ is None: raise ValueError("Argument subj cannot be None") if subj_ is None: raise ValueError("Argument subj may not be None") if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous: _subj_copyarray = False _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subj_ is not None: _subj_copyarray = True _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32)) _subj_np_tmp[:] = subj_ assert _subj_np_tmp.flags.contiguous _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subj_copyarray = False _subj_tmp = None _c_minlength = (num_) if (num_) > 0 and c_ is not None and len(c_) != (num_): raise ValueError("Array argument c is not long enough: Is %d, expected %d" % (len(c_),(num_))) if isinstance(c_,numpy.ndarray) and not c_.flags.writeable: raise ValueError("Argument c must be writable") if c_ is None: raise ValueError("Argument c may not be None") if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous: _c_copyarray = False _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif c_ is not None: _c_copyarray = True _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64)) _c_np_tmp[:] = c_ assert _c_np_tmp.flags.contiguous _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _c_copyarray = False _c_tmp = None res = __library__.MSK_XX_getclist(self.__nativep,num_,_subj_tmp,_c_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _c_copyarray: c_[:] = _c_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coefficients(self) :\n return self.__coefficients", "def coefficients(self):\n return self._coefficients", "def coefficients(self):\n return self._coefficients", "def coefficients(self) :\n raise NotImplementedError", "def coefficients(self):\r\n return self.coef_['x']", "def get_coefficients(self):\n return self.coefficients", "def get_coefficients(self):\n return self.coefficients", "def coefficients(self) -> np.ndarray:\n return self._coefficients", "def getListOfCompartments(self, *args):\n return _libsbml.Model_getListOfCompartments(self, *args)", "def get_C(self, observation, area):\n if observation:\n C = []\n for Theta in area.Thetas:\n C.append(Theta @ self.Omega)\n else:\n C = [self.Omega]\n return C", "def _clist(slist):\n retList = []\n if slist == None:\n return retList\n for p in slist:\n aobj = {}\n for prop in p.allProperties():\n if prop in IGNORED_PROPS:\n continue\n tmpval = p.valueForProperty_(prop)\n if type(tmpval) == ABMultiValueCoreDataWrapper:\n aval = [(_getVal(tmpval.labelAtIndex_(i)),\n _getVal(tmpval.valueAtIndex_(i)))\n for i in range(0, tmpval.count())]\n else:\n aval = _getVal(tmpval)\n if aval is not None:\n aobj[prop.lower()] = aval\n retList.append(aobj)\n return retList", "def get_base_coefs(mv):\n\trs = []\n\tfor bs in bases:\n\t\tt = []\n\t\tfor b in bs:\n\t\t\tt.append(mv.coef(b))\n\t\t\t\t\t\n\t\trs.append(t)\t\t\n\treturn rs", "def get_coefficients(poles):\n\n poles = np.array(poles)\n s = sp.symbols('s')\n poly = 1\n for s_i in poles:\n poly = (s - s_i) * poly\n poly = poly.expand()\n\n # calculate the coefficient of characteristic polynomial\n n = len(poles)\n p = []\n for i in range(n):\n p.append(poly.subs([(s, 0)]))\n poly = poly - p[i]\n poly = poly / s\n poly = poly.expand()\n\n # convert numbers and complex objects from multiplication to a complex number\n p = [complex(x) for x in p]\n # if imaginary part if greater than the boundary, then set imaginary part null\n boundary = 1e-12\n for idx, val in enumerate(p):\n val = complex(val)\n if abs(val.imag) > boundary:\n msg = \"Imaginary Part of the coefficient p[\" + \\\n str(idx) + \"] is not null (\" + str(val.imag) + \") for a given boundary of \" + \\\n str(boundary)\n warnings.warn(msg)\n p[idx] = val.real\n\n return np.array([p], dtype=float)", "def list_coefficients(self, format='components', fd=True, pos=True, neg=True, printimag=False, norm_neg=True, nmin=0, nmax=0, latex=False, nd=0, Lv=False, prime=False):\n M = self._space\n WR = M.WR\n C = self._coeffs\n if format[0] == \"C\" or format[0] == \"c\":\n self._list_coefficients_by_components(fd, pos, neg, printimag, norm_neg, nmin, nmax, latex, nd, Lv, prime)\n else:\n self._list_coefficients_by_discriminant(fd, pos, neg, printimag, norm_neg, nmin, nmax, latex, nd, Lv, prime)", "def tolist (self) :\r\n if self.complex :\r\n result = []\r\n for x in xrange(0,len(self)) :\r\n result.append(self[x])\r\n return result\r\n else :\r\n return self.impl.tolist()", "def get(self, *args):\n return _libsbml.ListOfCompartments_get(self, *args)", "def coefficients(self):\n if self._coefficients is None:\n return np.hstack([c.coefficients for c in self._traces])\n return self._coefficients", "def _pco_list(self, hdr, name, pos):\n if name not in ('poly', 'champs', 'offset'):\n raise TypeError('Name must be one of \"poly\", \"champs\", or \"offset\".')\n\n hdr.seek(pos + 16)\n length = unpack(self._bo + 'i', hdr.read(4))[0]\n d = []\n for p in range(length):\n if name == 'poly':\n d.append(self._species(hdr))\n else:\n raise NotImplementedError(\n '{}List is non-null, don\\'t know how to read.'\n ''.format(name.capitalize()))\n hdr.seek(4, 1)\n return d", "def sublistsC (seq):\n if seq:\n sublists = [([seq[0]] + a, b) for a, b in sublistsC(seq[1:])]\n return sublists + [(b, a) for a, b in sublists]\n else:\n return [([], [])]", "def putclist(self,subj_,val_):\n num_ = None\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(val_)\n elif num_ != len(val_):\n raise IndexError(\"Inconsistent length of array val\")\n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if val_ is None:\n raise ValueError(\"Argument val cannot be None\")\n if val_ is None:\n raise ValueError(\"Argument val may not be None\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n res = __library__.MSK_XX_putclist(self.__nativep,num_,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getListOfConstraints(self, *args):\n return _libsbml.Model_getListOfConstraints(self, *args)", "def putclist(self,subj,val): # 3\n num_ = None\n if num_ is None:\n num_ = len(subj)\n elif num_ != len(subj):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(val)\n elif num_ != len(val):\n raise IndexError(\"Inconsistent length of array val\")\n if num_ is None: num_ = 0\n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n \n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n \n if val is None: raise TypeError(\"Invalid type for argument val\")\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n \n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n \n res = self.__obj.putclist(num_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_cids(self):\n return tuple(getattr(self, name + \"_cid\") for name in self.__argnames__)", "def __getitem__( self, l ) :\n\n return( self.coefficients[l] )", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def get_coeffs(self):\n\n return self._coeff_to_dict()", "def findCurvePoints(self, x, y, c):\n\t\tyCurve = []\n\t\tfor xi in x:\n\t\t\tyi = self.polynomialFunct(c, xi)\n\t\t\t\n\t\t\tyCurve.append( yi )\n\t\t\n\t\treturn np.asarray(yCurve)", "def getListOfCompartments(self):\n return self.model.getListOfCompartments()", "def _list_coefficients_by_components(self, fd=True, pos=True, neg=True, printimag=False, norm_neg=True, nmin=0, nmax=0, latex=False, nd=0, Lvals=False, prime=False):\n sig = 1\n if(self._space.WR.is_dual()):\n sig = -1\n maxi = max(self._coeffs.keys())\n w1 = len(str(maxi))\n w2 = max(list(map(len, str(self._space.WR.D()).split())))\n maxn = max(self._coeffs[list(self._coeffs.keys())[0]].keys())\n w3 = len(str(maxn)) + 1\n C = self._coeffs\n mp0 = mpmath.mpf(0)\n mpold = mpmath.mp.dps\n N = self._space.WR.N\n if(mpmath.mp.dps < self.maxdigs):\n mpmath.mp.dps = self.maxdigs\n if norm_neg:\n cnorm = 0\n tnorm = (0, 0)\n for j in range(1, 100):\n t = rn_from_D(self.space.WR, -j * sig)\n if(t is None):\n continue\n if(t[1] + self._space.WR.Qv[t[0]] >= 0):\n continue\n c1 = self.get_coefficient(t[0], t[1])\n if c1 is None:\n continue\n if abs(c1) > self._prec:\n cnorm = c1\n tnorm = t\n print(\"c1=c({0})={1}\".format(tnorm, cnorm))\n break\n for r in C.keys():\n for n in range(min(C[r].keys()), max(C[r].keys()) + 1):\n if nmin > 0 and abs(n) < nmin:\n continue\n if nmax > 0 and abs(n) > nmax:\n continue\n nn = n + self._space.WR.Qv[r]\n if not neg and nn < 0:\n continue\n if not pos and nn >= 0:\n continue\n D = self._space.D_from_rn((r, n))\n if(fd):\n if fd and not is_fundamental_discriminant(D) and D != 1:\n continue\n if prime and gcd(D, N) > 1:\n continue\n c = self.get_coefficient(r, n)\n cs = \"\"\n if c != 0 and c is not None:\n if(nn >= 0):\n ss = \"+\"\n if(nn < 0):\n ss = \"-\"\n if(norm_neg):\n # print \"r,n=\",r,n\n # print \"cnorm=\",cnorm\n # print \"tnorm=\",tnorm\n D = self._space.D_from_rn((r, n))\n if ((r, n) != tnorm) and cnorm != 0:\n c = c / cnorm * mpmath.sqrt(mpmath.mpf(abs(D)))\n if c.real() >= 0:\n cs = \" \"\n if not printimag:\n if nd > 0:\n cs = str(c.real()).strip()\n cs = sci_pretty_print(cs, nd, latex_pow=latex)\n else:\n cs = str(c.real())\n else:\n cs = cs + str(c)\n if Lvals and list(self._Lv.keys()).count(D) == 1:\n ls = \"\\t\" + str(self._Lv[D])\n else:\n if latex:\n ls = \"\\\\\\\\ \\n\"\n else:\n ls = \"\"\n if latex:\n D = self._space.WR.D()[r]\n if(is_int(D)):\n p = numerator(D)\n q = denominator(D)\n sr = \"\\\\frac{\" + str(p) + \"}{\" + str(q) + \"}\"\n else:\n sr = str(D)\n ss = \"\"\n print(\"$C{0}({1},{2}) $ & $ {3} $ {4}\".format(ss, sr.ljust(w1), str(n).ljust(w3), cs, ls))\n else:\n print(\"C^{0}[{1}][{2}] = {3}\".format(ss, str(r).ljust(w1), str(n).ljust(w3), cs + ls))\n mpmath.mp.dps = mpold", "def cinters_circle(self, c):\r\n if self.__segments == None:\r\n self.__load_segments()\r\n \r\n result = []\r\n for segment in self.__segments:\r\n points = c.inters_segment(segment)\r\n for p in points:\r\n result.append(p) \r\n \r\n return result" ]
[ "0.61023015", "0.60017306", "0.60017306", "0.6000582", "0.58433247", "0.581803", "0.581803", "0.57937485", "0.5704129", "0.55387217", "0.5518362", "0.5512744", "0.5497052", "0.54852647", "0.5466663", "0.54118663", "0.54016364", "0.537931", "0.5366499", "0.5345296", "0.5335419", "0.5313061", "0.5308713", "0.5303487", "0.5299834", "0.5295104", "0.5264533", "0.5242031", "0.5236824", "0.5223935" ]
0.70591795
0
Obtains a sequence of coefficients from the objective. getcslice(self,first_,last_,c_)
def getcslice(self,first_,last_,c_): _c_minlength = ((last_) - (first_)) if ((last_) - (first_)) > 0 and c_ is not None and len(c_) != ((last_) - (first_)): raise ValueError("Array argument c is not long enough: Is %d, expected %d" % (len(c_),((last_) - (first_)))) if isinstance(c_,numpy.ndarray) and not c_.flags.writeable: raise ValueError("Argument c must be writable") if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous: _c_copyarray = False _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif c_ is not None: _c_copyarray = True _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64)) _c_np_tmp[:] = c_ assert _c_np_tmp.flags.contiguous _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _c_copyarray = False _c_tmp = None res = __library__.MSK_XX_getcslice(self.__nativep,first_,last_,_c_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if _c_copyarray: c_[:] = _c_np_tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getcslice(self,first_,last_,c): # 3\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((last_) - (first_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.getcslice(first_,last_,c_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_c:\n c[:] = _tmparr_c", "def getxcslice(self,whichsol_,first_,last_,xc_):\n _xc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and xc_ is not None and len(xc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),((last_) - (first_))))\n if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable:\n raise ValueError(\"Argument xc must be writable\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n res = __library__.MSK_XX_getxcslice(self.__nativep,whichsol_,first_,last_,_xc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _xc_copyarray:\n xc_[:] = _xc_np_tmp", "def getxcslice(self,whichsol_,first_,last_,xc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xc has wrong length\")\n res = self.__obj.getxcslice(whichsol_,first_,last_,xc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_xc:\n xc[:] = _tmparr_xc", "def getskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getslcslice(self,whichsol_,first_,last_,slc_):\n _slc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and slc_ is not None and len(slc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),((last_) - (first_))))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n res = __library__.MSK_XX_getslcslice(self.__nativep,whichsol_,first_,last_,_slc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp", "def getskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getslcslice(self,whichsol_,first_,last_,slc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument slc has wrong length\")\n res = self.__obj.getslcslice(whichsol_,first_,last_,slc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_slc:\n slc[:] = _tmparr_slc", "def putxcslice(self,whichsol_,first_,last_,xc_):\n _xc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and xc_ is not None and len(xc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),((last_) - (first_))))\n if xc_ is None:\n raise ValueError(\"Argument xc cannot be None\")\n if xc_ is None:\n raise ValueError(\"Argument xc may not be None\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n res = __library__.MSK_XX_putxcslice(self.__nativep,whichsol_,first_,last_,_xc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putxcslice(self,whichsol_,first_,last_,xc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if xc is None: raise TypeError(\"Invalid type for argument xc\")\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xc_ is not None and len(xc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument xc has wrong length\")\n res = self.__obj.putxcslice(whichsol_,first_,last_,xc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsucslice(self,whichsol_,first_,last_,suc_):\n _suc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and suc_ is not None and len(suc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),((last_) - (first_))))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n res = __library__.MSK_XX_getsucslice(self.__nativep,whichsol_,first_,last_,_suc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp", "def putcslice(self,first_,last_,slice_):\n _slice_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and slice_ is not None and len(slice_) != ((last_) - (first_)):\n raise ValueError(\"Array argument slice is not long enough: Is %d, expected %d\" % (len(slice_),((last_) - (first_))))\n if slice_ is None:\n raise ValueError(\"Argument slice cannot be None\")\n if slice_ is None:\n raise ValueError(\"Argument slice may not be None\")\n if isinstance(slice_, numpy.ndarray) and slice_.dtype is numpy.dtype(numpy.float64) and slice_.flags.contiguous:\n _slice_copyarray = False\n _slice_tmp = ctypes.cast(slice_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slice_ is not None:\n _slice_copyarray = True\n _slice_np_tmp = numpy.zeros(len(slice_),numpy.dtype(numpy.float64))\n _slice_np_tmp[:] = slice_\n assert _slice_np_tmp.flags.contiguous\n _slice_tmp = ctypes.cast(_slice_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slice_copyarray = False\n _slice_tmp = None\n \n res = __library__.MSK_XX_putcslice(self.__nativep,first_,last_,_slice_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def coefficients(self) :\n raise NotImplementedError", "def __getitem__( self, l ) :\n\n return( self.coefficients[l] )", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def getsucslice(self,whichsol_,first_,last_,suc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument suc has wrong length\")\n res = self.__obj.getsucslice(whichsol_,first_,last_,suc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_suc:\n suc[:] = _tmparr_suc", "def putconboundsliceconst(self,first_,last_,bkc_,blc_,buc_):\n res = __library__.MSK_XX_putconboundsliceconst(self.__nativep,first_,last_,bkc_,blc_,buc_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def c(self) -> np.ndarray:\n return self._vector[10:12]", "def getsolutionslice(self,whichsol_,solitem_,first_,last_,values_):\n _values_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and values_ is not None and len(values_) != ((last_) - (first_)):\n raise ValueError(\"Array argument values is not long enough: Is %d, expected %d\" % (len(values_),((last_) - (first_))))\n if isinstance(values_,numpy.ndarray) and not values_.flags.writeable:\n raise ValueError(\"Argument values must be writable\")\n if isinstance(values_, numpy.ndarray) and values_.dtype is numpy.dtype(numpy.float64) and values_.flags.contiguous:\n _values_copyarray = False\n _values_tmp = ctypes.cast(values_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif values_ is not None:\n _values_copyarray = True\n _values_np_tmp = numpy.zeros(len(values_),numpy.dtype(numpy.float64))\n _values_np_tmp[:] = values_\n assert _values_np_tmp.flags.contiguous\n _values_tmp = ctypes.cast(_values_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _values_copyarray = False\n _values_tmp = None\n \n res = __library__.MSK_XX_getsolutionslice(self.__nativep,whichsol_,solitem_,first_,last_,_values_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _values_copyarray:\n values_[:] = _values_np_tmp", "def coefficients(self) :\n return self.__coefficients", "def __setslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___setslice__(self, *args)", "def coeff(self, pt):\n if pt < self.start or pt > self.stop:\n raise IndexError(\"Index %s out of bounds %s\" % (pt, self.interval))\n return self._eval_coeff(pt)", "def __getitem__(self, s) :\n try :\n return self.coefficients()[s]\n except KeyError :\n return self.parent().coefficient_domain().zero_element()", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def coefficients(self):\r\n return self.coef_['x']", "def __getslice__(self,i,j):\n return self.x[i:j]", "def __getitem__(self, c, *rest):\n if isinstance(c, Partition):\n assert len(rest) == 0\n else:\n if len(rest) > 0 or isinstance(c,(int,Integer)):\n c = self._kbounded_partitions.element_class(self._kbounded_partitions, [c]+list(rest))\n else:\n c = self._kbounded_partitions.element_class(self._kbounded_partitions, list(c))\n if len(c) != 0 and c[0] > self.k:\n raise ValueError(\"Partition is not %d-bounded\"%self.k)\n return self.monomial(c)", "def putconboundslice(self,first_,last_,bkc_,blc_,buc_):\n _bkc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bkc_ is not None and len(bkc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bkc is not long enough: Is %d, expected %d\" % (len(bkc_),((last_) - (first_))))\n if bkc_ is None:\n raise ValueError(\"Argument bkc cannot be None\")\n if bkc_ is None:\n raise ValueError(\"Argument bkc may not be None\")\n if bkc_ is not None:\n _bkc_tmp = (ctypes.c_int32 * len(bkc_))(*bkc_)\n else:\n _bkc_tmp = None\n _blc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and blc_ is not None and len(blc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument blc is not long enough: Is %d, expected %d\" % (len(blc_),((last_) - (first_))))\n if blc_ is None:\n raise ValueError(\"Argument blc cannot be None\")\n if blc_ is None:\n raise ValueError(\"Argument blc may not be None\")\n if isinstance(blc_, numpy.ndarray) and blc_.dtype is numpy.dtype(numpy.float64) and blc_.flags.contiguous:\n _blc_copyarray = False\n _blc_tmp = ctypes.cast(blc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blc_ is not None:\n _blc_copyarray = True\n _blc_np_tmp = numpy.zeros(len(blc_),numpy.dtype(numpy.float64))\n _blc_np_tmp[:] = blc_\n assert _blc_np_tmp.flags.contiguous\n _blc_tmp = ctypes.cast(_blc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blc_copyarray = False\n _blc_tmp = None\n \n _buc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and buc_ is not None and len(buc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument buc is not long enough: Is %d, expected %d\" % (len(buc_),((last_) - (first_))))\n if buc_ is None:\n raise ValueError(\"Argument buc cannot be None\")\n if buc_ is None:\n raise ValueError(\"Argument buc may not be None\")\n if isinstance(buc_, numpy.ndarray) and buc_.dtype is numpy.dtype(numpy.float64) and buc_.flags.contiguous:\n _buc_copyarray = False\n _buc_tmp = ctypes.cast(buc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif buc_ is not None:\n _buc_copyarray = True\n _buc_np_tmp = numpy.zeros(len(buc_),numpy.dtype(numpy.float64))\n _buc_np_tmp[:] = buc_\n assert _buc_np_tmp.flags.contiguous\n _buc_tmp = ctypes.cast(_buc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _buc_copyarray = False\n _buc_tmp = None\n \n res = __library__.MSK_XX_putconboundslice(self.__nativep,first_,last_,_bkc_tmp,_blc_tmp,_buc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putslcslice(self,whichsol_,first_,last_,slc_):\n _slc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and slc_ is not None and len(slc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),((last_) - (first_))))\n if slc_ is None:\n raise ValueError(\"Argument slc cannot be None\")\n if slc_ is None:\n raise ValueError(\"Argument slc may not be None\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n res = __library__.MSK_XX_putslcslice(self.__nativep,whichsol_,first_,last_,_slc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_putskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putcslice(self,first_,last_,slice): # 3\n if slice is None: raise TypeError(\"Invalid type for argument slice\")\n if slice is None:\n slice_ = None\n else:\n try:\n slice_ = memoryview(slice)\n except TypeError:\n try:\n _tmparr_slice = array.array(\"d\",slice)\n except TypeError:\n raise TypeError(\"Argument slice has wrong type\")\n else:\n slice_ = memoryview(_tmparr_slice)\n \n else:\n if slice_.format != \"d\":\n slice_ = memoryview(array.array(\"d\",slice))\n \n if slice_ is not None and len(slice_) != ((last_) - (first_)):\n raise ValueError(\"Array argument slice has wrong length\")\n res = self.__obj.putcslice(first_,last_,slice_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)" ]
[ "0.74884826", "0.6898075", "0.6749403", "0.6479534", "0.6430882", "0.62302166", "0.6188517", "0.59158885", "0.5810388", "0.5780244", "0.56624705", "0.56085896", "0.5574313", "0.5566425", "0.55087215", "0.5465687", "0.5429033", "0.5421072", "0.5413725", "0.53567517", "0.5345228", "0.5335757", "0.5319425", "0.5318317", "0.53081983", "0.5288195", "0.5286638", "0.5285024", "0.528301", "0.5271648" ]
0.7475892
1
Obtains a double information item. getdouinf(self,whichdinf_)
def getdouinf(self,whichdinf_): dvalue_ = ctypes.c_double() res = __library__.MSK_XX_getdouinf(self.__nativep,whichdinf_,ctypes.byref(dvalue_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) dvalue_ = dvalue_.value _dvalue_return_value = dvalue_ return (_dvalue_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdouinf(self,whichdinf_): # 3\n if not isinstance(whichdinf_,dinfitem): raise TypeError(\"Argument whichdinf has wrong type\")\n res,resargs = self.__obj.getdouinf(whichdinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dvalue_return_value = resargs\n return _dvalue_return_value", "def getintinf(self,whichiinf_): # 3\n if not isinstance(whichiinf_,iinfitem): raise TypeError(\"Argument whichiinf has wrong type\")\n res,resargs = self.__obj.getintinf(whichiinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def getDip(self):\n return self._dip", "def getD(self):\r\n return self.D", "def getDi(self, n=None, label=None):\n return self.di[label][n]", "def get_details(disease):\n\treturn d_desc_map[disease]", "def XPLMGetDatad_f(inRefcon):\n pass", "def idd_info(self):\n if self._idd_info is None:\n bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(\n self.idfname, self.iddname, self, commdct=None, block=None\n )\n self._block = block\n self._idd_info = commdct\n self._idd_index = idd_index\n self._idfobjects = bunchdt\n self._model = data\n self._idd_version = versiontuple\n return self._idd_info", "def get_wotd():\n\treturn wotd", "def GetDatum(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_GetDatum(self, *args)", "def __getitem__(self, item):\n return self.hdus[item]", "def getintinf(self,whichiinf_):\n ivalue_ = ctypes.c_int32()\n res = __library__.MSK_XX_getintinf(self.__nativep,whichiinf_,ctypes.byref(ivalue_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n ivalue_ = ivalue_.value\n _ivalue_return_value = ivalue_\n return (_ivalue_return_value)", "def driehoekig(self):\n return self._driehoekig.get_waarde()", "def getDouble(self, int: int, int2: int) -> float:\n ...", "def FindDatum(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_FindDatum(self, *args)", "def getiddname(self):\n return self.iddname", "def x(self):\r\n return self.unif[0]", "def getlintinf(self,whichliinf_): # 3\n if not isinstance(whichliinf_,liinfitem): raise TypeError(\"Argument whichliinf has wrong type\")\n res,resargs = self.__obj.getlintinf(whichliinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def get_dcmgnd(self):\n return self.dcmgnd", "def doi(self):\n return LiteratureReader(self.record).doi", "def _get(weather_data, item):\r\n return weather_data.get(item, \"\")", "def dn(self):\n return self.dndlogdp.mul(self.dlogdp)", "def getNeighbor(self, neighborID):", "def getDensityEstimate(self):\n return self.density", "def getFood(self):\n return self.data.food", "def den(self):\n return self.den", "def info(self):\n return self.nfo", "def hdu_info(self):\n return self._hdusinfo", "def get_dih(year):\r\n return common.get_dict(get_dih_filename(year), 'DaysInHospital', int)", "def getValue(self):\n return DPxGetDinValue()" ]
[ "0.8139567", "0.61747223", "0.57404494", "0.56027913", "0.54675555", "0.5288993", "0.5271653", "0.52566004", "0.5161955", "0.5105282", "0.50878364", "0.50546545", "0.5054538", "0.5047877", "0.5047438", "0.5029467", "0.50197035", "0.5018882", "0.5017841", "0.49776104", "0.49767607", "0.49569342", "0.4952468", "0.49438515", "0.49278337", "0.4920823", "0.49157676", "0.49137434", "0.49049678", "0.4890677" ]
0.7271567
1
Obtains a double parameter. getdouparam(self,param_)
def getdouparam(self,param_): parvalue_ = ctypes.c_double() res = __library__.MSK_XX_getdouparam(self.__nativep,param_,ctypes.byref(parvalue_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) parvalue_ = parvalue_.value _parvalue_return_value = parvalue_ return (_parvalue_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdouparam(self,param_): # 3\n if not isinstance(param_,dparam): raise TypeError(\"Argument param has wrong type\")\n res,resargs = self.__obj.getdouparam(param_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _parvalue_return_value = resargs\n return _parvalue_return_value", "def getParam(self,param):\n if param in self.params.keys():\n return self.params[param]\n else:\n return None", "def get_param(self, param):\n return self.params.get(param, None)", "def param(self, *args, **kwargs):\n return self.options.param(*args,**kwargs)", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def get_param(self, parameter):\n if self.c.get(1).get('data'):\n return self.c.get(1).data.get(parameter, None)\n return None", "def param(self):\n return self._param", "def get_param_values(self,obs_name,param):\n\n return self.datasets[obs_name][param]", "def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError", "def _param(self) ->nn.Parameter:\n return next(self.parameters())", "def get_parameter(self, param):\n try:\n result = self._data[\"queryResult\"][\"parameters\"][param]\n except KeyError:\n result = None\n\n return result", "def getDouble(self, int: int, int2: int) -> float:\n ...", "def getParameter(self, name):", "def point_to_param(self, pt):\n r = self.p2 - self.p1\n return (pt - self.p1).dot(r) / r.square()", "def get(self, param, phase=\"last\", name=\"Main\"):\n df = self.summary(name=name)\n if param not in df.columns:\n raise KeyError(f\"@param must be in {', '.join(df.columns)}.\")\n if phase == \"last\":\n phase = df.index[-1]\n return df.loc[phase, param]", "def gui_get_param(self,param_name):\n return self._tkvars[param_name].get()", "def param_to_point(self, param):\n return self.p1 + param * (self.p2 - self.p1)", "def get_parameter(cur, par):\n cur.execute(\"SELECT value FROM parameters WHERE par='%s';\" % par)\n return cur.fetchone()[0]", "def get_param(self):\n\t\treturn handle_to_object(call_sdk_function('PrlResult_GetParam', self.handle))", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def d_Drepp_d_par(self,par):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n # first term\n term1 = -cosE*self.prtl_der('alpha',par)\n # second term\n term2 = (self.alpha()*sinE - \\\n (self.beta()+self.GAMMA)*cosE)*self.prtl_der('E',par)\n # Third term\n term3 = -sinE*(self.prtl_der('beta',par)+self.prtl_der('GAMMA',par))\n\n return term1+term2+term3", "def get_param_duration(param):\n\n # dummy value\n value = rospy.Duration(1)\n\n try:\n # only a default value in case the param gets fuzzed.\n value = rospy.Duration(get_param_num(param))\n except ValueError:\n err_msg = (\n \"Param %s has the invalid value '%s'.\"\n % (param, rospy.get_param(param)))\n rospy.logerr(err_msg)\n rospy.signal_shutdown(err_msg)\n value = rospy.Duration(1)\n return value", "def d_alpha_d_par(self,par):\n\n if par not in self.binary_params:\n errorMesg = par + \"is not in binary parameter list.\"\n raise ValueError(errorMesg)\n\n if par in ['A1','A1DOT']:\n dername = 'd_alpha_d_'+par\n return getattr(self,dername)()\n\n else:\n dername = 'd_omega_d_'+par # For parameters only in Ae\n if hasattr(self,dername):\n cosOmg=np.cos(self.omega())\n return self.a1()/c.c*cosOmg*getattr(self,dername)()\n else:\n return np.longdouble(np.zeros(len(self.tt0)))", "def getParameter(self, *args):\n return _libsbml.Model_getParameter(self, *args)", "def getParameter(self, *args):\n return _libsbml.KineticLaw_getParameter(self, *args)", "def get_value(self, param, freq):\n p = self.get_parameter(param)\n value = p.get_value(freq)\n return value", "def Getdxdparam(Mda,Mdb,Xa):\n\n Xb = Xa.copy()\n #Xb[iulag] = Xa[iulag] + (1-Xa[iq]*Xa[iM])*(Mdb.ubar-Mda.ubar)\n Xb[Mdb.nX:Mdb.nXY] = Mdb.F(Xb[Mdb.interpstates])\n Xb[Mdb.nXY:] = Mdb.Static(Xb)\n\n if CLArgs.param == \"b\":\n D = Mdb.b() - Mda.b()\n else:\n D = Mdb.tau - Mda.tau\n\n return (Xb[iM] - Xa[iM])/D", "def patience_param(x):\n # -- TODO: make this do something!\n return x", "def from_param(self):\n return self._dither", "def get_param(param):\n try:\n return get_param_server().getParam(param)\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")" ]
[ "0.7216291", "0.6473787", "0.63799834", "0.6056583", "0.6039823", "0.60313606", "0.5998796", "0.59530497", "0.5941797", "0.5872715", "0.5843549", "0.5813088", "0.5800583", "0.576759", "0.57210326", "0.56473833", "0.5598247", "0.5590956", "0.55658185", "0.5548109", "0.5548043", "0.5540383", "0.55133027", "0.5474782", "0.54703546", "0.54520005", "0.5437544", "0.54323894", "0.5427863", "0.54197294" ]
0.7105613
1
Computes the dual objective value associated with the solution. getdualobj(self,whichsol_)
def getdualobj(self,whichsol_): dualobj_ = ctypes.c_double() res = __library__.MSK_XX_getdualobj(self.__nativep,whichsol_,ctypes.byref(dualobj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) dualobj_ = dualobj_.value _dualobj_return_value = dualobj_ return (_dualobj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdualobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getdualobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dualobj_return_value = resargs\n return _dualobj_return_value", "def getprimalobj(self,whichsol_):\n primalobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getprimalobj(self.__nativep,whichsol_,ctypes.byref(primalobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n primalobj_ = primalobj_.value\n _primalobj_return_value = primalobj_\n return (_primalobj_return_value)", "def getprimalobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprimalobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _primalobj_return_value = resargs\n return _primalobj_return_value", "def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def objective_val(self):\n return self.m.ObjVal", "def get_sol(self):", "def getdualsolutionnorms(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getdualsolutionnorms(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nrmy_return_value,_nrmslc_return_value,_nrmsuc_return_value,_nrmslx_return_value,_nrmsux_return_value,_nrmsnx_return_value,_nrmbars_return_value = resargs\n return _nrmy_return_value,_nrmslc_return_value,_nrmsuc_return_value,_nrmslx_return_value,_nrmsux_return_value,_nrmsnx_return_value,_nrmbars_return_value", "def getdualsolutionnorms(self,whichsol_):\n nrmy_ = ctypes.c_double()\n nrmslc_ = ctypes.c_double()\n nrmsuc_ = ctypes.c_double()\n nrmslx_ = ctypes.c_double()\n nrmsux_ = ctypes.c_double()\n nrmsnx_ = ctypes.c_double()\n nrmbars_ = ctypes.c_double()\n res = __library__.MSK_XX_getdualsolutionnorms(self.__nativep,whichsol_,ctypes.byref(nrmy_),ctypes.byref(nrmslc_),ctypes.byref(nrmsuc_),ctypes.byref(nrmslx_),ctypes.byref(nrmsux_),ctypes.byref(nrmsnx_),ctypes.byref(nrmbars_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nrmy_ = nrmy_.value\n _nrmy_return_value = nrmy_\n nrmslc_ = nrmslc_.value\n _nrmslc_return_value = nrmslc_\n nrmsuc_ = nrmsuc_.value\n _nrmsuc_return_value = nrmsuc_\n nrmslx_ = nrmslx_.value\n _nrmslx_return_value = nrmslx_\n nrmsux_ = nrmsux_.value\n _nrmsux_return_value = nrmsux_\n nrmsnx_ = nrmsnx_.value\n _nrmsnx_return_value = nrmsnx_\n nrmbars_ = nrmbars_.value\n _nrmbars_return_value = nrmbars_\n return (_nrmy_return_value,_nrmslc_return_value,_nrmsuc_return_value,_nrmslx_return_value,_nrmsux_return_value,_nrmsnx_return_value,_nrmbars_return_value)", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def objective(self) -> Optional[Union[int, float]]:\n if self.solution is not None:\n if isinstance(self.solution, list):\n return getattr(self.solution[-1], \"objective\", None)\n else:\n return getattr(self.solution, \"objective\", None)\n else:\n return None", "def objective(self):\n return self._objective", "def _get_rhs(self, thermo, chem_pot_array, beta):\n num_singlets = len(self._ground_states) - 1\n matrix = np.zeros((num_singlets, num_singlets))\n energy_vector = np.zeros(num_singlets)\n for i in range(num_singlets):\n for j in range(num_singlets):\n ref_singlet = thermo[0][get_singlet_name(\n self._singlet_names[j])]\n singlet = thermo[i +\n 1][get_singlet_name(self._singlet_names[j])]\n matrix[i, j] = ref_singlet - singlet\n ref_energy = thermo[0][\"energy\"] / \\\n len(self._ground_states[0][\"atoms\"])\n energy = thermo[i + 1][\"energy\"] / \\\n len(self._ground_states[i + 1][\"atoms\"])\n energy_vector[i] = ref_energy - energy\n inv_matrix = np.linalg.inv(matrix)\n rhs = inv_matrix.dot(energy_vector) / beta - chem_pot_array / beta\n return rhs", "def get_solution(self):\r\n return self.solution", "def get_objective(self, X_v, U_v, X_last_p, U_last_p):\n objective = None\n return objective", "def getsolsta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolsta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = resargs\n _solsta_return_value = solsta(_solsta_return_value)\n return _solsta_return_value", "def dual(self):\n return dual_array(self)", "def recover_dual_variables(task, sol, inverse_data):\n dual_vars = dict()\n\n # Dual variables for the inequality constraints\n suc_len = sum(ell for _, ell in inverse_data['suc_slacks'])\n if suc_len > 0:\n suc = [0.] * suc_len\n task.getsucslice(sol, 0, suc_len, suc)\n dual_vars.update(MOSEK._parse_dual_var_block(suc, inverse_data['suc_slacks']))\n\n # Dual variables for the original equality constraints\n y_len = sum(ell for _, ell in inverse_data['y_slacks'])\n if y_len > 0:\n y = [0.] * y_len\n task.getyslice(sol, suc_len, suc_len + y_len, y)\n y = [-val for val in y]\n dual_vars.update(MOSEK._parse_dual_var_block(y, inverse_data['y_slacks']))\n\n # Dual variables for SOC and EXP constraints\n snx_len = sum(ell for _, ell in inverse_data['snx_slacks'])\n if snx_len > 0:\n snx = np.zeros(snx_len)\n task.getsnxslice(sol, inverse_data['n0'], inverse_data['n0'] + snx_len, snx)\n dual_vars.update(MOSEK._parse_dual_var_block(snx, inverse_data['snx_slacks']))\n\n # Dual variables for PSD constraints\n for j, (id, dim) in enumerate(inverse_data['psd_dims']):\n sj = [0.] * (dim * (dim + 1) // 2)\n task.getbarsj(sol, j, sj)\n dual_vars[id] = vectorized_lower_tri_to_mat(sj, dim)\n\n # Now that all dual variables have been recovered, find those corresponding\n # to the exponential cone, and permute the entries to reflect the CVXPY\n # standard for the exponential cone.\n for con in inverse_data['constraints']:\n if isinstance(con, ExpCone):\n cid = con.id\n perm = expcone_permutor(con.num_cones(), MOSEK.EXP_CONE_ORDER)\n dual_vars[cid] = dual_vars[cid][perm]\n return dual_vars", "def dual_objective_expression_rule(_m):\r\n\r\n # Build limits\r\n t_1 = sum(- (m.mu_2[z, y] * m.SOLAR_BUILD_LIMITS[z]) - (m.mu_3[z, y] * m.WIND_BUILD_LIMITS[z]) - (\r\n m.mu_4[z, y] * m.STORAGE_BUILD_LIMITS[z]) for z in m.Z for y in m.Y)\r\n\r\n # Min power output\r\n t_2 = sum(\r\n m.sigma_1[g, y, s, t] * m.P_MIN[g] for g in m.G.difference(m.G_STORAGE) for y in m.Y for s in m.S for t\r\n in m.T)\r\n\r\n # Max power - existing generators\r\n t_3 = sum(\r\n - m.sigma_2[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_THERM for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Max power - existing wind\r\n t_4 = sum(\r\n - m.sigma_4[g, y, s, t] * m.Q_W[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_WIND for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing solar\r\n t_5 = sum(\r\n - m.sigma_6[g, y, s, t] * m.Q_S[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_SOLAR for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing hydro\r\n t_6 = sum(\r\n - m.sigma_8[g, y, s, t] * m.P_H[g, y, s, t] * (1 - m.F[g, y]) for g in m.G_E_HYDRO for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max charging power - existing storage\r\n t_7 = sum(\r\n - m.sigma_11[g, y, s, t] * m.P_IN_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max discharging power - existing storage\r\n t_8 = sum(\r\n - m.sigma_13[g, y, s, t] * m.P_OUT_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max energy - existing storage units\r\n t_9 = sum(\r\n - m.sigma_16[g, y, s, t] * m.Q_MAX[g] for g in m.G_E_STORAGE for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Min energy - interval end\r\n t_10 = sum(m.sigma_18[g, y, s] * m.Q_END_MIN[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Max energy - interval end\r\n t_11 = sum(- m.sigma_19[g, y, s] * m.Q_END_MAX[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Ramp-up constraint - generators\r\n t_12 = sum(\r\n - m.sigma_20[g, y, s, t] * m.RR_UP[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-up constraint - initial power output - generators\r\n t_13 = sum(\r\n - m.sigma_20[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y\r\n for s in m.S)\r\n\r\n # Ramp-down constraint - generators\r\n t_18 = sum(\r\n - m.sigma_23[g, y, s, t] * m.RR_DOWN[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-down constraint - initial power output - generators\r\n t_19 = sum(\r\n m.sigma_23[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for\r\n s in m.S)\r\n\r\n # Min powerflow\r\n t_24 = sum(m.sigma_27[l, y, s, t] * m.POWERFLOW_MIN[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Max powerflow\r\n t_25 = sum(\r\n - m.sigma_28[l, y, s, t] * m.POWERFLOW_MAX[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Demand\r\n t_26 = sum(m.lamb[z, y, s, t] * m.DEMAND[z, y, s, t] for z in m.Z for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Initial storage unit energy\r\n t_27 = sum(m.zeta_1[g, y, s, m.T.first()] * m.Q0[g, y, s] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n return (t_1 + t_2 + t_3 + t_4 + t_5 + t_6 + t_7 + t_8 + t_9 + t_10 + t_11 + t_12 + t_13 + t_18 + t_19 + t_24\r\n + t_25 + t_26 + t_27)", "def define_objective(m):\r\n\r\n # Dual objective function\r\n m.OBJECTIVE = Objective(expr=m.DUAL_OBJECTIVE_EXPRESSION, sense=maximize)\r\n\r\n return m", "def _objfunc(self, dv_dict):\n\n fail = 0\n metadata = self.metadata\n system = self.root\n\n try:\n for name in self.indep_list:\n self.set_desvar(name, dv_dict[name])\n\n # Execute the model\n #print(\"Setting DV\")\n #print(dv_dict)\n\n self.iter_count += 1\n update_local_meta(metadata, (self.iter_count,))\n\n try:\n with self.root._dircontext:\n system.solve_nonlinear(metadata=metadata)\n\n # Let the optimizer try to handle the error\n except AnalysisError:\n fail = 1\n\n func_dict = self.get_objectives() # this returns a new OrderedDict\n func_dict.update(self.get_constraints())\n\n # Record after getting obj and constraint to assure they have\n # been gathered in MPI.\n self.recorders.record_iteration(system, metadata)\n\n # Get the double-sided constraint evaluations\n #for key, con in iteritems(self.get_2sided_constraints()):\n # func_dict[name] = np.array(con.evaluate(self.parent))\n\n except Exception as msg:\n tb = traceback.format_exc()\n\n # Exceptions seem to be swallowed by the C code, so this\n # should give the user more info than the dreaded \"segfault\"\n print(\"Exception: %s\" % str(msg))\n print(70*\"=\",tb,70*\"=\")\n fail = 1\n func_dict = {}\n\n #print(\"Functions calculated\")\n #print(func_dict)\n return func_dict, fail", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def objective(self):\n pass", "def getdviolvar(self,whichsol_,sub,viol): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n num_ = None\n if num_ is None:\n num_ = len(sub)\n elif num_ != len(sub):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None: num_ = 0\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n \n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n \n if viol is None: raise TypeError(\"Invalid type for argument viol\")\n _copyback_viol = False\n if viol is None:\n viol_ = None\n else:\n try:\n viol_ = memoryview(viol)\n except TypeError:\n try:\n _tmparr_viol = array.array(\"d\",viol)\n except TypeError:\n raise TypeError(\"Argument viol has wrong type\")\n else:\n viol_ = memoryview(_tmparr_viol)\n _copyback_viol = True\n else:\n if viol_.format != \"d\":\n viol_ = memoryview(array.array(\"d\",viol))\n _copyback_viol = True\n if viol_ is not None and len(viol_) != (num_):\n raise ValueError(\"Array argument viol has wrong length\")\n res = self.__obj.getdviolvar(whichsol_,num_,sub_,viol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_viol:\n viol[:] = _tmparr_viol", "def get_objective(self, sampler=None):\n def objective(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy(circuit, sampler)\n\n def obj_expect(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy_sparse(circuit)\n\n if sampler is not None:\n return objective\n if self.sparse is None:\n self.make_sparse()\n return obj_expect", "def return_solver(self):\r\n sol = ht3_solver(self.mesh)\r\n sol.max_T = self.max_T\r\n sol.d_T = self.d_T\r\n sol.saved_data = self.saved_data\r\n sol.node_map = self.node_map\r\n return sol", "def getprosta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprosta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = resargs\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value", "def get_dual_val(self, var_name, pos):\n val = self.get_other_value(self.dual_var, var_name, pos)\n if not self.pdv_to_csv: # if not saved to csv file\n return val\n else: # otherwise, we should get the file path and read from the file to array or mat\n f_path = os.path.join(self.root_dir, 'dual_vars', var_name, str(val) + '.csv')\n df = pd.read_csv(f_path, header = None) # first read csv file into a pandas data frame and then transform\n return np.asmatrix(df.values)", "def update_current_sol_and_cost(self,sol=None):\n\n # Update current sol if argument given\n if sol is not None:\n self.current_sol = sol\n \n # Update residual and cost\n try:\n self.residual = self.sketch_reweighted - self.sketch_of_solution(self.current_sol)\n self.current_sol_cost = np.linalg.norm(self.residual)\n except AttributeError: # We are here if self.current_sol does not exist yet\n self.current_sol, self.residual = None, self.sketch_reweighted\n self.current_sol_cost = np.inf", "def getObjective(self, *args):\n return _libsbml.FbcModelPlugin_getObjective(self, *args)" ]
[ "0.8319954", "0.6920154", "0.6874271", "0.63215345", "0.6047877", "0.5961174", "0.59548587", "0.59537184", "0.58911043", "0.5846982", "0.5843001", "0.5692419", "0.5645508", "0.5627053", "0.5615924", "0.5534404", "0.5509056", "0.5460137", "0.5445977", "0.5432232", "0.5387754", "0.5381813", "0.53387713", "0.5336079", "0.53168064", "0.5313718", "0.5278235", "0.52592915", "0.52289516", "0.5227192" ]
0.79599565
1
Obtains an integer information item. getintinf(self,whichiinf_)
def getintinf(self,whichiinf_): ivalue_ = ctypes.c_int32() res = __library__.MSK_XX_getintinf(self.__nativep,whichiinf_,ctypes.byref(ivalue_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) ivalue_ = ivalue_.value _ivalue_return_value = ivalue_ return (_ivalue_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getintinf(self,whichiinf_): # 3\n if not isinstance(whichiinf_,iinfitem): raise TypeError(\"Argument whichiinf has wrong type\")\n res,resargs = self.__obj.getintinf(whichiinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def getlintinf(self,whichliinf_): # 3\n if not isinstance(whichliinf_,liinfitem): raise TypeError(\"Argument whichliinf has wrong type\")\n res,resargs = self.__obj.getlintinf(whichliinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def getlintinf(self,whichliinf_):\n ivalue_ = ctypes.c_int64()\n res = __library__.MSK_XX_getlintinf(self.__nativep,whichliinf_,ctypes.byref(ivalue_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n ivalue_ = ivalue_.value\n _ivalue_return_value = ivalue_\n return (_ivalue_return_value)", "def getdouinf(self,whichdinf_): # 3\n if not isinstance(whichdinf_,dinfitem): raise TypeError(\"Argument whichdinf has wrong type\")\n res,resargs = self.__obj.getdouinf(whichdinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dvalue_return_value = resargs\n return _dvalue_return_value", "def getInteger(self):", "def getInteger(self):", "def getInteger(self):\n pass", "def getI(self):\n\n return self.i", "def ins_ii(self):\n return self._ins_ii", "def ipi(self):\n return self._ipi", "def XPLMGetDatai_f(inRefcon):\n return int", "def get_int2(self):\n pass", "def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)", "def _getIntFeature(self):\n\n # create args\n valueToGet = c_int64()\n\n errorCode = VimbaDLL.featureIntGet(self._handle,\n self._name,\n byref(valueToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return valueToGet.value", "def getdouinf(self,whichdinf_):\n dvalue_ = ctypes.c_double()\n res = __library__.MSK_XX_getdouinf(self.__nativep,whichdinf_,ctypes.byref(dvalue_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dvalue_ = dvalue_.value\n _dvalue_return_value = dvalue_\n return (_dvalue_return_value)", "def _get_iops(self, report):\n match = re.search(\"iops\\=(\\d+)\", report)\n if match:\n return int(match.group(1))", "def getSubintinfo(self,value):\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n return None", "def i(self):\n return self._i", "def getIR2() -> int:\n pass", "def get_inttemp(self):\n return self.read_register(4104, 1, 3)", "def XPLMGetDatai(inDataRef):\n return int", "def getInteger(self):\n assert self._is_int is True\n return self._value", "def ied_num(self) -> str:\n return pulumi.get(self, \"ied_num\")", "def getIR1() -> int:\n pass", "def instrID(self):\n return self.query('*IDN?')", "def getInteger(self):\n return self.value if self.isInteger() else None", "def getInt(self):\n try:\n data, n = self._buf.pop(_I_LEN)\n \n if n != _I_LEN:\n raise SerializationError('There is not enough data left.')\n \n return _I_STRUCT.unpack(data)[0]\n except StructError as e:\n raise SerializationError('Data is not a valid integer: '\n '{0}'.format(e))", "def __int__(self):\n return self.get_raw_int()", "def get_ibat(self):\n return self.read_register(4097, 1, 3)", "def getint(self, option):\n return getint(self.name, option)" ]
[ "0.8475795", "0.7179274", "0.71271324", "0.6888658", "0.61792326", "0.61792326", "0.61284137", "0.6106969", "0.6090202", "0.60640436", "0.5953424", "0.5949886", "0.5806069", "0.5791027", "0.57118326", "0.57102764", "0.5709367", "0.5658817", "0.56567997", "0.5631905", "0.5562323", "0.55616623", "0.55563307", "0.55512255", "0.555092", "0.551458", "0.5497488", "0.5486335", "0.5473889", "0.5445329" ]
0.80981195
1
Obtains a long integer information item. getlintinf(self,whichliinf_)
def getlintinf(self,whichliinf_): ivalue_ = ctypes.c_int64() res = __library__.MSK_XX_getlintinf(self.__nativep,whichliinf_,ctypes.byref(ivalue_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) ivalue_ = ivalue_.value _ivalue_return_value = ivalue_ return (_ivalue_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getlintinf(self,whichliinf_): # 3\n if not isinstance(whichliinf_,liinfitem): raise TypeError(\"Argument whichliinf has wrong type\")\n res,resargs = self.__obj.getlintinf(whichliinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def getintinf(self,whichiinf_): # 3\n if not isinstance(whichiinf_,iinfitem): raise TypeError(\"Argument whichiinf has wrong type\")\n res,resargs = self.__obj.getintinf(whichiinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ivalue_return_value = resargs\n return _ivalue_return_value", "def getintinf(self,whichiinf_):\n ivalue_ = ctypes.c_int32()\n res = __library__.MSK_XX_getintinf(self.__nativep,whichiinf_,ctypes.byref(ivalue_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n ivalue_ = ivalue_.value\n _ivalue_return_value = ivalue_\n return (_ivalue_return_value)", "def getLong(self, int: int, int2: int) -> int:\n ...", "def getdouinf(self,whichdinf_): # 3\n if not isinstance(whichdinf_,dinfitem): raise TypeError(\"Argument whichdinf has wrong type\")\n res,resargs = self.__obj.getdouinf(whichdinf_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dvalue_return_value = resargs\n return _dvalue_return_value", "def read_long(self):\n return self._packers[\"l\"].unpack(self.read(4))[0]", "def getLong(self, name: unicode) -> long:\n ...", "def getLong(t, swipl):\n i = c_long()\n if swipl.PL_get_long(t, byref(i)):\n return i.value\n else:\n raise InvalidTypeError(\"long\")", "def getLongRow(self, int: int) -> typing.List[int]:\n ...", "def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit", "def __long__(self):\n if len(self) == 8:\n return struct_Q.unpack(self)[0]\n else:\n raise ValueError(\"Unable to cast field to int: length must be 8 bytes, field length is %d\" % len(self))", "def lun(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"lun\")", "def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...", "def serialize_long(self, obj):\n return self.serialize_int(obj)", "def we(self):\n return self.long", "def get_int2(self):\n pass", "def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit", "def get_list_index(self):\r\n s = self.query('LIST:IND?')\r\n return int(s)", "def get_list_index(self):\r\n s = self.query('LIST:IND?')\r\n return int(s)", "def getInteger(self):", "def getInteger(self):", "def read_long_long(data):\n s_type = \"=%s\" % get_type(\"long_long\")\n return struct.unpack(s_type, data.read(8))[0]", "def get_Lo(self):\n return self.Lo", "def get_Lo(self):\n return self.Lo", "def getlong(self, option, default = None, section = None):\n return long(self.get(option, default, section))", "def read_long(self):\n a, b, c, d = self.read_list(4)\n return a << 24 | b << 16 | c << 8 | d", "def getInteger(self):\n pass", "def _get_iops(self, report):\n match = re.search(\"iops\\=(\\d+)\", report)\n if match:\n return int(match.group(1))" ]
[ "0.80946934", "0.72867656", "0.6978907", "0.6415118", "0.5947596", "0.58325267", "0.5797108", "0.5752881", "0.5589606", "0.53243405", "0.53243405", "0.530509", "0.52829987", "0.5265193", "0.52526945", "0.52087086", "0.5194227", "0.5194157", "0.5146076", "0.512527", "0.512527", "0.51208526", "0.51208526", "0.51144636", "0.51099765", "0.51099765", "0.5086055", "0.50773627", "0.50750315", "0.50549746" ]
0.8262633
0
Obtains an integer parameter. getintparam(self,param_)
def getintparam(self,param_): parvalue_ = ctypes.c_int32() res = __library__.MSK_XX_getintparam(self.__nativep,param_,ctypes.byref(parvalue_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) parvalue_ = parvalue_.value _parvalue_return_value = parvalue_ return (_parvalue_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getintparam(self,param_): # 3\n if not isinstance(param_,iparam): raise TypeError(\"Argument param has wrong type\")\n res,resargs = self.__obj.getintparam(param_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _parvalue_return_value = resargs\n return _parvalue_return_value", "def _get_int_param(request, param):\n try:\n int_param = utils.validate_integer(request.GET[param], param,\n min_value=0)\n except exception.InvalidInput as e:\n raise webob.exc.HTTPBadRequest(explanation=e.format_message())\n return int_param", "def getIntParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n try: return int(value)\n except: return default", "def getintparam(name, default=None, stash=None, params=None):\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return int(v[0])\n return default", "def getIntParam(self, params, name):\n try:\n return int(params.get(name))\n except:\n return None", "def get_param(self, param):\n return self.params.get(param, None)", "def getParam(self,param):\n if param in self.params.keys():\n return self.params[param]\n else:\n return None", "def get_param(param):\n try:\n return get_param_server().getParam(param)\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")", "def putintparam(self,param_,parvalue_): # 3\n if not isinstance(param_,iparam): raise TypeError(\"Argument param has wrong type\")\n res = self.__obj.putintparam(param_,parvalue_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def param(self, param_nb: int) -> int:\n mode = get_digit_right_to_left(self.modes, param_nb - 1)\n param_index = self.pointer + param_nb\n if mode == 1:\n # immediate mode\n return param_index\n if mode == 2:\n # relative mode\n return self.relative_base + self.program[param_index]\n else:\n # position mode\n return self.program[param_index]", "def get_parameter(self, param):\n try:\n result = self._data[\"queryResult\"][\"parameters\"][param]\n except KeyError:\n result = None\n\n return result", "def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param", "def get_param(self):\n\t\treturn handle_to_object(call_sdk_function('PrlResult_GetParam', self.handle))", "def getInt( self, par, path ):\n\n return self.db.getIntPar( par, path )", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0", "def getParam(key):\n \n if globalParams == {}:\n warning(\"WARNING: runtime parameters not yet initialized\")\n LoadParams(\"_defaults\")\n \n if key in globalParams.keys():\n return globalParams[key]\n else:\n raise ValueError()", "def param(self, *args, **kwargs):\n return self.options.param(*args,**kwargs)", "def get_parameter(cur, par):\n cur.execute(\"SELECT value FROM parameters WHERE par='%s';\" % par)\n return cur.fetchone()[0]", "def _parse_positive_int_param(request, query_params, param_name):\n param = query_params.get(param_name)\n if not param:\n return None\n try:\n param = int(param)\n if param <= 0:\n raise ValueError()\n return param\n except ValueError:\n request.respond('query parameter \"%s\" must be integer > 0' % param_name,\n 'text/plain', 400)\n return -1", "def get_param(self, parameter):\n if self.c.get(1).get('data'):\n return self.c.get(1).data.get(parameter, None)\n return None", "def param(self):\n return self._param", "def getInt(self, int: int, int2: int) -> int:\n ...", "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def get_param_id(self, param_name, syselem):\n\n with self.__connection.cursor() as cursor:\n query = \"SELECT PID FROM %s WHERE NAME= '%s' AND SYSTEM_ELEMENT= '%s'\" % (self.__schema, param_name, syselem)\n cursor.execute(query)\n result = cursor.fetchone()\n return result['PID']", "def gui_get_param(self,param_name):\n return self._tkvars[param_name].get()", "def get_page_arg() -> int:\n page = request.args.get('page', '1')\n try:\n page = int(page)\n except ValueError:\n page = 1\n return page", "def get_param_as_int(self, name, default=None, required=False):\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n val = self._params[name]\n try:\n return int(val)\n except ValueError:\n pass\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')", "def param_num(self, *, include_tp: bool = False, include_gq: bool = False) -> int:\n return self._param_num(self.model, int(include_tp), int(include_gq))", "def get_parameter(self, key):\n return self._params[key]" ]
[ "0.85646707", "0.78389037", "0.7805483", "0.73943967", "0.73424447", "0.72594076", "0.70536864", "0.66357017", "0.6586575", "0.65700495", "0.64918053", "0.64472634", "0.6425993", "0.62868637", "0.625066", "0.6248809", "0.6234398", "0.62195563", "0.61886173", "0.61466604", "0.61426115", "0.6115582", "0.6094909", "0.6038746", "0.60289836", "0.60244733", "0.60098374", "0.59927326", "0.5985235", "0.5973088" ]
0.8217122
1
Obtains the number of preallocated constraints in the optimization task. getmaxnumcon(self)
def getmaxnumcon(self): maxnumcon_ = ctypes.c_int32() res = __library__.MSK_XX_getmaxnumcon(self.__nativep,ctypes.byref(maxnumcon_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) maxnumcon_ = maxnumcon_.value _maxnumcon_return_value = maxnumcon_ return (_maxnumcon_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getmaxnumcon(self): # 3\n res,resargs = self.__obj.getmaxnumcon()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumcon_return_value = resargs\n return _maxnumcon_return_value", "def getmaxnumcone(self): # 3\n res,resargs = self.__obj.getmaxnumcone()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumcone_return_value = resargs\n return _maxnumcone_return_value", "def getNumConstraints(self):\n return _libsbml.Model_getNumConstraints(self)", "def getmaxnumcone(self):\n maxnumcone_ = ctypes.c_int32()\n res = __library__.MSK_XX_getmaxnumcone(self.__nativep,ctypes.byref(maxnumcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumcone_ = maxnumcone_.value\n _maxnumcone_return_value = maxnumcone_\n return (_maxnumcone_return_value)", "def maximum_number_of_workers(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def getmaxnumvar(self): # 3\n res,resargs = self.__obj.getmaxnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumvar_return_value = resargs\n return _maxnumvar_return_value", "def number_of_constraints(self):\n return len(self.constraints)", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def control_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count_max or 0)", "def no_of_constraints(self):\n return np.sum(self._no_of_constraints_by_object())", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def getmaxnumbarvar(self): # 3\n res,resargs = self.__obj.getmaxnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumbarvar_return_value = resargs\n return _maxnumbarvar_return_value", "def getnumcon(self): # 3\n res,resargs = self.__obj.getnumcon()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numcon_return_value = resargs\n return _numcon_return_value", "def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']", "def num_conll(self):\n pass", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def maxdim(self):\n return self._maxdim", "def getmaxnumvar(self):\n maxnumvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getmaxnumvar(self.__nativep,ctypes.byref(maxnumvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumvar_ = maxnumvar_.value\n _maxnumvar_return_value = maxnumvar_\n return (_maxnumvar_return_value)", "def max_pods_constraint(self) -> Optional[pulumi.Input['MaxPodsConstraintArgs']]:\n return pulumi.get(self, \"max_pods_constraint\")", "def putmaxnumcon(self,maxnumcon_): # 3\n res = self.__obj.putmaxnumcon(maxnumcon_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def numprocesses(self):\n info = self.info()\n return info['max_processes']", "def max_num_neighbors(self):\n return self._max_num_neighbors", "def max_capacity(self) -> int:\n return self._max_capacity", "def max_concurrency(self) -> Optional[int]:\n result = get_feature(self.vm, \"qubes-vm-update-max-concurrency\", None)\n if result is None:\n return result\n return int(result)", "def total_max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_max_node_count\")", "def maxTasksAchievable(self):\n maxTasks = 0\n for w in self._workers:\n maxTasks = maxTasks + w.multitask\n return maxTasks", "def potential_max(self):\n\n return self._args.max" ]
[ "0.759891", "0.6874313", "0.6721544", "0.659825", "0.6556438", "0.65256405", "0.6443037", "0.64323866", "0.6414494", "0.63860065", "0.63830274", "0.63426894", "0.63342726", "0.6323241", "0.63202226", "0.63005996", "0.62932444", "0.62694067", "0.62653077", "0.62133545", "0.61960167", "0.6166631", "0.6159207", "0.6138153", "0.6138132", "0.61246973", "0.61196136", "0.61174357", "0.6110054", "0.6084179" ]
0.7039377
1
Obtains the length of the name of a semidefinite variable. getbarvarnamelen(self,i_)
def getbarvarnamelen(self,i_): len_ = ctypes.c_int32() res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) len_ = len_.value _len_return_value = len_ return (_len_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def namelength(self):\n return self[\"namelength\"]", "def getlenbarvarj(self,j_): # 3\n res,resargs = self.__obj.getlenbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _lenbarvarj_return_value = resargs\n return _lenbarvarj_return_value", "def getconenamelen(self,i_): # 3\n res,resargs = self.__obj.getconenamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getlenbarvarj(self,j_):\n lenbarvarj_ = ctypes.c_int64()\n res = __library__.MSK_XX_getlenbarvarj(self.__nativep,j_,ctypes.byref(lenbarvarj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n lenbarvarj_ = lenbarvarj_.value\n _lenbarvarj_return_value = lenbarvarj_\n return (_lenbarvarj_return_value)", "def nvar(self):\n return len(self.__vars)", "def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def length_name(self):\n return self._src_decoder.length_tensor_name", "def getconenamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconenamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def __len__(self) -> int:\n return len(self.variables)", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def get_var_nbytes(self, var_name):\n return self.get_value_ref(var_name).nbytes", "def size(self, varname):\n if self.handle == None: return []\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return []\n \n def dimlen(d):\n dim = self.handle.dimensions[d]\n if dim != None:\n t = type(dim).__name__\n if t == 'int':\n return dim\n return len(dim)\n return 0\n return map(lambda d: dimlen(d), var.dimensions)", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def nvar(self):\n return len(self.v)", "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarname(self,j_):\n sizename_ = (1 + self.getvarnamelen((j_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getvarname(self.__nativep,j_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getconnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getnumbarvar(self): # 3\n res,resargs = self.__obj.getnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numbarvar_return_value = resargs\n return _numbarvar_return_value", "def __len__(self):\n return len(self._varvals)", "def num_vars(self):\n return len(self.bounds.lb)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def length(self):\n\t\treturn self.n" ]
[ "0.906098", "0.8285865", "0.80473113", "0.757455", "0.7460946", "0.68837076", "0.68781674", "0.6619891", "0.65064466", "0.6430593", "0.61780226", "0.6132598", "0.6126362", "0.61090654", "0.60909855", "0.60322", "0.597303", "0.59556276", "0.5942991", "0.5911243", "0.59078074", "0.5867095", "0.58635956", "0.5848426", "0.58124405", "0.57977766", "0.5785533", "0.5717685", "0.570815", "0.5688925" ]
0.8887873
1
Obtains the name of a semidefinite variable. getbarvarname(self,i_)
def getbarvarname(self,i_): sizename_ = (1 + self.getbarvarnamelen((i_))) name_ = (ctypes.c_char * (sizename_))() res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _name_retval = name_.value.decode("utf-8",errors="replace") return (_name_retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def get_variable_name(self, variable_index):\n return self.variable_names[variable_index - 1]", "def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def putbarvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putbarvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def retrieve_name(self, var):\r\n\t\tfor fi in reversed(inspect.stack()):\r\n\t\t\tnames = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\r\n\t\t\tif len(names) > 0:\r\n\t\t\t\treturn names[0]\r\n\t\treturn \"<unknown>\"", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def varname(p):\n for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:\n m = re.search(r'\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)\n if m:\n return m.group(1)", "def labelit(self, varname):\n \n if not varname:\n return \"\"\n return self.vardict[varname].VariableLabel or varname", "def name_at_position(self, i: int) -> str:\n return self.names[i]", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def getvarname(self,j_):\n sizename_ = (1 + self.getvarnamelen((j_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getvarname(self.__nativep,j_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def retrieve_name(var):\n for fi in reversed(inspect.stack()):\n names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\n if len(names) > 0:\n return names[0]", "def getName(self):\n dataDict = self.__dict__\n result = self.varName\n if result is None:\n result = self.chemComp.name\n return result", "def name_at_position(self, i: int) -> str:\n upper = len(self.names) - 1\n if not 0 <= i <= upper:\n raise ValueError(f'Column index must be between 0 and {upper:d}, inclusive')\n return self.names[i]", "def _get_histname(self, plot, var, frame):\n return '_'.join([plot, var, frame])", "def _get_name(var):\n lcls = inspect.stack()[2][0].f_locals\n for name in lcls:\n if id(var) == id(lcls[name]):\n return name\n return None", "def varname(self) -> str:\n var = getattr(self.parent, \"varname\", None) if self.parent else None\n if var:\n return f\"{var}.{self.name}\"\n else:\n if self.name is None:\n raise SerdeError(\"Field name is None.\")\n return self.name", "def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def getName(self, index) -> Str:\n ...", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def build_stkvar_name(*args):\n return _ida_frame.build_stkvar_name(*args)", "def get_variable_name(uuid: UUID, variable_index: int) -> Optional[str]:\n scenario: Optional[AoE2DEScenario] = store.get_scenario(uuid)\n if scenario:\n if gv := get_game_version(uuid) == \"DE\":\n variable = scenario.trigger_manager.get_variable(variable_index)\n if variable:\n return variable.name\n elif 0 <= variable_index <= 255:\n return f\"Variable {variable_index}\"\n else:\n raise ValueError(f\"Scenarios with the game version: {gv} do not support variables.\")\n return None", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def variable_string(self, name):\n return \"$(\" + name + \")\"", "def getAtomName(self, iAtom):\n atomNames = self.getAtomNames()\n return atomNames[iAtom]" ]
[ "0.83525527", "0.7066876", "0.70244217", "0.6815771", "0.6767695", "0.6751615", "0.6699136", "0.6654075", "0.660367", "0.6567873", "0.65562636", "0.652688", "0.63431066", "0.6170237", "0.61540467", "0.6131156", "0.5982232", "0.59685725", "0.592272", "0.59037554", "0.5895002", "0.58925927", "0.5853056", "0.5846602", "0.58019036", "0.57799923", "0.57513803", "0.57187146", "0.5620404", "0.559909" ]
0.8312654
1
Obtains the index of semidefinite variable from its name. getbarvarnameindex(self,somename_)
def getbarvarnameindex(self,somename_): if isinstance(somename_,unicode): somename_ = somename_.encode("utf-8",errors="replace") asgn_ = ctypes.c_int32() index_ = ctypes.c_int32() res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) asgn_ = asgn_.value _asgn_return_value = asgn_ index_ = index_.value _index_return_value = index_ return (_asgn_return_value,_index_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def index(self, varname):\n if not isinstance(varname, str):\n raise TypeError(\"argument must be str\")\n varname = self._find_vars(varname, empty_ok=False, single=True)[0]\n return self._varlist.index(varname)", "def getvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def get_variable(self, variable_name):\n assert self.variable_name_to_index is not None\n return self.variable_name_to_index[variable_name]", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_variable_name(self, variable_index):\n return self.variable_names[variable_index - 1]", "def get_indexname(self,index):\n if index in self.indexname2index:\n return index\n else:\n for name,i in self.indexname2index.items():\n if self.index(index)==i:\n return name\n return None", "def _returnSplitIndex(self, var, pltIndex, ind):\n\n firstSplit = self.__splitVariableNames(var, (pltIndex, ind))\n if firstSplit[2].strip() not in self.sourceData[pltIndex].getVars(firstSplit[1].lower()):\n self.raiseAnError(IOError, f'Variable \"{firstSplit[2]}\" not found in \"{firstSplit[1]}\" of DataObject \"{self.sourceData[pltIndex].name}\"!')\n split = firstSplit[2]\n\n return split", "def indexOf(self, aName):\n if aName in self.subroutineTable:\n tup = self.subroutineTable[aName]\n elif aName in self.classTable:\n tup = self.classTable[aName]\n else:\n return None\n\n # DEBUG\n if self.DEBUG:\n print(\"DEBUG(SymbolTable): INDEX OF '{}': {}\".format(aName, tup[2]))\n\n # Extract the index from the tuple\n return tup[2]", "def index(self, factor_name):\n return self._factor_names.index(str(factor_name))", "def getNameIndex(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n index = int(name[location:])\n except Exception:\n index = 0\n return index", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def varname(p):\n for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:\n m = re.search(r'\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)\n if m:\n return m.group(1)", "def getconenameindex(self,somename_): # 3\n res,resargs = self.__obj.getconenameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def getSolRatioVarIndx( self, var ):\n \n self.updateAdb( )\n\n if var in self.solNames:\n return self.solNames[ var ]\n elif var in self.solNames.values():\n return var\n else:\n return -1", "def putbarvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putbarvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_tkinter_index(self,index):\n if isinstance(index,str):\n if index in self.indexname2index:\n i=self.indexname2index[index]\n else:\n # pass through tkinter to get 'end' etc converted to index\n i=self.index(index)\n else:\n i=index\n return i", "def getOhcNameIndx( self, name ):\n \n if not self.ohcNames:\n self.getOhcNames( )\n\n if name in self.ohcNames:\n return self.ohcNames[ name ]\n elif name in self.ohcNames.values():\n return name\n else:\n return -1", "def getOqiNameIndx( self, name ):\n \n if not self.oqiNames:\n self.getOqiNames( )\n\n if name in self.oqiNames:\n return self.oqiNames[ name ]\n elif name in self.oqiNames.values():\n return name\n else:\n return -1", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def axis_index(self, key):\n for i, name in enumerate(self.axis_labels):\n if name == key:\n return i\n raise ValueError(f'Axis not found: {key}')", "def getName(self, index) -> Str:\n ...", "def getconnameindex(self,somename_): # 3\n res,resargs = self.__obj.getconnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def get_label(self, index, key=\"Name\"):\n return eval(self.names[key][index])", "def getOthNameIndx( self, name ):\n \n if not self.othNames:\n self.getOthNames( )\n\n if name in self.othNames:\n return self.othNames[ name ]\n elif name in self.othNames.values():\n return name\n else:\n return -1", "def get_index_by_name(self, name):\r\n for i in range(len(self.vertices)):\r\n if self.vertices[i].name == name:\r\n return i\r\n raise ValueError('Reverse look up of name failed.')", "def fromLabel(name):\n return Data.labels.index(name)" ]
[ "0.87919295", "0.77998066", "0.758929", "0.73232466", "0.72942966", "0.70397556", "0.68763274", "0.6527494", "0.63246256", "0.6289317", "0.62172043", "0.6201318", "0.6189726", "0.6124115", "0.61120933", "0.61010575", "0.6051296", "0.6038391", "0.60204947", "0.5998834", "0.5993513", "0.59837496", "0.5982089", "0.598158", "0.59755677", "0.5968575", "0.59400934", "0.5931184", "0.5905522", "0.5885698" ]
0.8470228
1
Generates systematic names for variables. generatevarnames(self,subj_,fmt_,dims_,sp_)
def generatevarnames(self,subj_,fmt_,dims_,sp_): num_ = None if num_ is None: num_ = len(subj_) elif num_ != len(subj_): raise IndexError("Inconsistent length of array subj") if subj_ is None: raise ValueError("Argument subj cannot be None") if subj_ is None: raise ValueError("Argument subj may not be None") if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous: _subj_copyarray = False _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subj_ is not None: _subj_copyarray = True _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32)) _subj_np_tmp[:] = subj_ assert _subj_np_tmp.flags.contiguous _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subj_copyarray = False _subj_tmp = None if isinstance(fmt_,unicode): fmt_ = fmt_.encode("utf-8",errors="replace") ndims_ = None if ndims_ is None: ndims_ = len(dims_) elif ndims_ != len(dims_): raise IndexError("Inconsistent length of array dims") if dims_ is None: raise ValueError("Argument dims cannot be None") if dims_ is None: raise ValueError("Argument dims may not be None") if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous: _dims_copyarray = False _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif dims_ is not None: _dims_copyarray = True _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32)) _dims_np_tmp[:] = dims_ assert _dims_np_tmp.flags.contiguous _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _dims_copyarray = False _dims_tmp = None _sp_minlength = (num_) if (num_) > 0 and sp_ is not None and len(sp_) != (num_): raise ValueError("Array argument sp is not long enough: Is %d, expected %d" % (len(sp_),(num_))) if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous: _sp_copyarray = False _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif sp_ is not None: _sp_copyarray = True _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64)) _sp_np_tmp[:] = sp_ assert _sp_np_tmp.flags.contiguous _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _sp_copyarray = False _sp_tmp = None res = __library__.MSK_XX_generatevarnames(self.__nativep,num_,_subj_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def vars(svars):\n return np.array([pm.var(var) for var in svars.split()])", "def generateconnames(self,subi_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subi_)\n elif num_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generateconnames(self.__nativep,num_,_subi_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def build_stkvar_name(*args):\n return _ida_frame.build_stkvar_name(*args)", "def _generate_var_name(prefix, field_name):\n return (\n \"_\".join((prefix, field_name)).upper()\n if prefix\n else field_name.upper()\n )", "def put_var_names(self, var_type, num_vars, var_names):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n # var names must all be of same length due to Fortran restrictions\n var_names = [\"{0:{1}s}\".format(x, MAX_STR_LENGTH)[:MAX_STR_LENGTH]\n for x in var_names]\n ierr = exolib.py_expvan(self.exoid, var_type.lower(), var_names)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var names\")", "def get_name():\n return \"SVMd+\"", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def generate_name(syllables):\n name = []\n for x in range(0, syllables):\n name.extend(generate_syllable())\n return ''.join(name).title()", "def _(self, node: Name):\n if node.name not in self.var_map:\n self.var_map.append(node.name)\n\n idx = self.var_map.index(node.name)\n\n return f\"Var{idx}\"", "def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout", "def gen_name():\n return choice(globals()[choice(['oc_males', 'oc_females'])]) + ' ' + choice(na_surnames)", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def generateconenames(self,subk_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subk_)\n elif num_ != len(subk_):\n raise IndexError(\"Inconsistent length of array subk\")\n if subk_ is None:\n raise ValueError(\"Argument subk cannot be None\")\n if subk_ is None:\n raise ValueError(\"Argument subk may not be None\")\n if isinstance(subk_, numpy.ndarray) and subk_.dtype is numpy.dtype(numpy.int32) and subk_.flags.contiguous:\n _subk_copyarray = False\n _subk_tmp = ctypes.cast(subk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subk_ is not None:\n _subk_copyarray = True\n _subk_np_tmp = numpy.zeros(len(subk_),numpy.dtype(numpy.int32))\n _subk_np_tmp[:] = subk_\n assert _subk_np_tmp.flags.contiguous\n _subk_tmp = ctypes.cast(_subk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subk_copyarray = False\n _subk_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generateconenames(self.__nativep,num_,_subk_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_name():\n return \"SVMd+ - simplified approach\"", "def _create_parameter_names(self):\n self._parameter_names = self.parameter_schema[\"problem\"][\"names\"]", "def _create_parameter_names(self):\n self._parameter_names = [key for key in self.parameter_schema.keys() if key != 'num_simulations']", "def main():\n var_name = prompt_for_var_name()\n var_words = normalize_var_name(var_name)\n for case in CASES:\n out_var_name = render_case(var_words, case)\n print(out_var_name)", "def _(self, node: Var):\n if node.val.name not in self.var_map:\n self.var_map.append(node.val.name)\n\n idx = self.var_map.index(node.val.name)\n return f\"Var{idx}\"", "def _var_name_generator():\n count = itertools.count()\n while True:\n yield '_var_' + str(count.next())", "def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )", "def scalar_name(m1, m2):\n root = \"/Users/atlytle/Dropbox/pycode/tifr/data/\"\n return root + \"HOscalar_l2464_m{0}_m{1}.npy\".format(m1, m2)", "def _get_dataset_names(ds, field):\n\n dsf = ds[field]\n\n standard_name, long_name, var_name = None, None, field\n long_name = _get_dataset_attr(dsf, 'long_name')\n standard_name = _get_dataset_attr(dsf, 'standard_name')\n\n return standard_name, long_name, var_name", "def generate_name():\n return random.choice(ADJECTIVES) + \"_\" + random.choice(TOOLS)", "def varname_generator(id_name, code_name, code_range):\n # generate variable names according to the ones mentioned in the respective dataset coding report\n select_vars = [id_name]\n for i in range(1, (code_range + 1)):\n # creates a varname and adds an integer to it\n var_name = code_name + '{!s}'.format(i)\n select_vars.append(var_name)\n # finally, return the varname list\n return select_vars", "def _get_histname(self, plot, var, frame):\n return '_'.join([plot, var, frame])", "def _get_name(self):\n name_string = '%s_%s' % (\n self.parameter_type,\n self.parameter_value_type)\n if self.paneltemplateparametermarker_set.count() > 0:\n marker_string = \"_\".join(sorted([m.marker.marker_abbreviation for m in self.paneltemplateparametermarker_set.all()]))\n name_string += '_' + marker_string\n if self.fluorochrome:\n name_string += '_' + self.fluorochrome.fluorochrome_abbreviation\n return name_string", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in global_op_namespace:\n global_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n global_op_namespace[t] += 1\n suffix = f\"{global_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in global_var_namespace:\n new_name = _gen(op_type)\n\n global_var_namespace.add(new_name)\n return new_name" ]
[ "0.6139157", "0.5973622", "0.5816728", "0.5790525", "0.57094544", "0.55101526", "0.5501698", "0.54202396", "0.5395023", "0.5386658", "0.5385736", "0.5382898", "0.5350833", "0.5279343", "0.5254986", "0.52490866", "0.5238918", "0.5237092", "0.523355", "0.5229274", "0.51856", "0.51614726", "0.51358855", "0.512054", "0.51177984", "0.5117594", "0.5116451", "0.5110454", "0.5100621", "0.5097198" ]
0.7959128
0
Generates systematic names for constraints. generateconnames(self,subi_,fmt_,dims_,sp_)
def generateconnames(self,subi_,fmt_,dims_,sp_): num_ = None if num_ is None: num_ = len(subi_) elif num_ != len(subi_): raise IndexError("Inconsistent length of array subi") if subi_ is None: raise ValueError("Argument subi cannot be None") if subi_ is None: raise ValueError("Argument subi may not be None") if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous: _subi_copyarray = False _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subi_ is not None: _subi_copyarray = True _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32)) _subi_np_tmp[:] = subi_ assert _subi_np_tmp.flags.contiguous _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subi_copyarray = False _subi_tmp = None if isinstance(fmt_,unicode): fmt_ = fmt_.encode("utf-8",errors="replace") ndims_ = None if ndims_ is None: ndims_ = len(dims_) elif ndims_ != len(dims_): raise IndexError("Inconsistent length of array dims") if dims_ is None: raise ValueError("Argument dims cannot be None") if dims_ is None: raise ValueError("Argument dims may not be None") if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous: _dims_copyarray = False _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif dims_ is not None: _dims_copyarray = True _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32)) _dims_np_tmp[:] = dims_ assert _dims_np_tmp.flags.contiguous _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _dims_copyarray = False _dims_tmp = None _sp_minlength = (num_) if (num_) > 0 and sp_ is not None and len(sp_) != (num_): raise ValueError("Array argument sp is not long enough: Is %d, expected %d" % (len(sp_),(num_))) if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous: _sp_copyarray = False _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif sp_ is not None: _sp_copyarray = True _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64)) _sp_np_tmp[:] = sp_ assert _sp_np_tmp.flags.contiguous _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _sp_copyarray = False _sp_tmp = None res = __library__.MSK_XX_generateconnames(self.__nativep,num_,_subi_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateconenames(self,subk_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subk_)\n elif num_ != len(subk_):\n raise IndexError(\"Inconsistent length of array subk\")\n if subk_ is None:\n raise ValueError(\"Argument subk cannot be None\")\n if subk_ is None:\n raise ValueError(\"Argument subk may not be None\")\n if isinstance(subk_, numpy.ndarray) and subk_.dtype is numpy.dtype(numpy.int32) and subk_.flags.contiguous:\n _subk_copyarray = False\n _subk_tmp = ctypes.cast(subk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subk_ is not None:\n _subk_copyarray = True\n _subk_np_tmp = numpy.zeros(len(subk_),numpy.dtype(numpy.int32))\n _subk_np_tmp[:] = subk_\n assert _subk_np_tmp.flags.contiguous\n _subk_tmp = ctypes.cast(_subk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subk_copyarray = False\n _subk_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generateconenames(self.__nativep,num_,_subk_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def generatevarnames(self,subj_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generatevarnames(self.__nativep,num_,_subj_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def generate_name(syllables):\n name = []\n for x in range(0, syllables):\n name.extend(generate_syllable())\n return ''.join(name).title()", "def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout", "def getconname(self,i_): # 3\n sizename_ = (1 + self.getconnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconename(self,i_):\n sizename_ = (1 + self.getconenamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconename(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconname(self,i_):\n sizename_ = (1 + self.getconnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \\\n r\"\\hat{E}\", r\"g_1\"", "def seq_name(seq):\n if len(seq) == 1:\n return cp_name(seq[0])\n return 'u' + '_'.join('%04X' % cp for cp in seq)", "def init_name(self):\r\n try:\r\n rval = self.name\r\n except AttributeError:\r\n if 0:\r\n l = []\r\n for n in self.fgraph.toposort():\r\n if hasattr(n.op, \"name\") and n.op.name is not None:\r\n v = n.op.name\r\n if v.startswith(\"Composite\"):\r\n v = v[len(\"Composite\"):]\r\n else:\r\n v = n.op.__class__.__name__\r\n l.append(v)\r\n rval = \"Composite{\" + \",\".join(l) + \"}\"\r\n else:\r\n for i, r in enumerate(self.fgraph.inputs):\r\n r.name = 'i%i' % i\r\n for i, r in enumerate(self.fgraph.outputs):\r\n r.name = 'o%i' % i\r\n io = set(self.fgraph.inputs + self.fgraph.outputs)\r\n for i, r in enumerate(self.fgraph.variables):\r\n if r not in io and len(r.clients) > 1:\r\n r.name = 't%i' % i\r\n rval = \"Composite{%s}\" % str(self.fgraph)\r\n self.name = rval", "def _build_name(self):\n self.ctrl_name = NameUtils.get_unique_name(self.asset,\n self.side,\n self.part,\n \"ctrl\")", "def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def addConstrs(self, constrs, name=''):\n ...", "def putconname(self,i_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconname(self.__nativep,i_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __repr__(self):\n return \"CSP(\"+str(self.domains)+\", \"+str([str(c) for c in self.constraints])+\")\"", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def recreate_subgraphs_name():\n global SUBGRAPHS\n for (name, subgraph) in SUBGRAPHS.items():\n subgraph.set_name(\"\\\"cluster_\" + subgraph.get_name() + \"\\\"\")", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def _build_fname_templ(n):\n parts =[globals.ds_fn_templ.format(i='{i_ref:d}', ds='{ref}', var='{ref_var}')]\n for i in range(1, n):\n parts += [globals.ds_fn_templ.format(i='{i_ds%i:d}' % i, ds='{ds%i}' % i,\n var='{var%i}' % i)]\n return globals.ds_fn_sep.join(parts) + '.nc'", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def _generate_expanded_column_names(self):\n\n names = []\n # Get names of the descriptors\n des_names = [column for column in self.descriptor_dataframe][1:]\n\n # Generate expanded descriptor names for each compound\n for i in range(self.total_compounds):\n for des_name in des_names:\n name = 'compund_{}_{}'.format(i, des_name)\n names.append(name)\n\n return names", "def _subconstituent_name(h):\n if h == 1:\n o = \"1st\"\n elif h == 2:\n o = \"2nd\"\n elif h == 3:\n o = \"3rd\"\n else:\n o = \"%dth\" % h\n return \"%s subconstituent\" % o", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def _create_parameter_names(self):\n self._parameter_names = self.parameter_schema[\"problem\"][\"names\"]", "def name_corr(self):\n raise NotImplementedError\n ## Build matrix of names\n corr_mat = []\n for ind in range(self.n_in):\n corr_mat.append(\n list(map(lambda s: s + \",\" + self.domain.var[ind], self.domain.var))\n )\n\n ## Access matrix of names\n corr_names = dict()\n corr_ind = triu_indices(self.n_in, 1)\n for knd in range(len(corr_ind[0])):\n ind = corr_ind[0][knd]\n jnd = corr_ind[1][knd]\n corr_names[\"corr_\" + str(knd)] = corr_mat[ind][jnd]\n\n return corr_names", "def new_constraint_name(self, column, type):\n name = self.name.lstrip('migrate_')[:30]\n if type == 'UNIQUE':\n return '{}_{}_{}_uniq'.format(name, column[:15], self._random_string(8))\n elif type == 'PRIMARY KEY':\n return '{}_{}_pkey'.format(name, self._random_string(4))\n else:\n raise NotImplementedError('Name not implemented for type {}'.format(type))", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label" ]
[ "0.71432316", "0.6309858", "0.57458603", "0.5728091", "0.5542116", "0.5457417", "0.54459995", "0.5413401", "0.5405197", "0.5331198", "0.5301801", "0.5269671", "0.526506", "0.52508026", "0.5176214", "0.5176144", "0.51522493", "0.5143848", "0.50646645", "0.50584084", "0.505461", "0.5048936", "0.49997544", "0.4977249", "0.4975671", "0.4974384", "0.49604258", "0.49600518", "0.49191839", "0.4908742" ]
0.80321515
0
Generates systematic names for cone. generateconenames(self,subk_,fmt_,dims_,sp_)
def generateconenames(self,subk_,fmt_,dims_,sp_): num_ = None if num_ is None: num_ = len(subk_) elif num_ != len(subk_): raise IndexError("Inconsistent length of array subk") if subk_ is None: raise ValueError("Argument subk cannot be None") if subk_ is None: raise ValueError("Argument subk may not be None") if isinstance(subk_, numpy.ndarray) and subk_.dtype is numpy.dtype(numpy.int32) and subk_.flags.contiguous: _subk_copyarray = False _subk_tmp = ctypes.cast(subk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif subk_ is not None: _subk_copyarray = True _subk_np_tmp = numpy.zeros(len(subk_),numpy.dtype(numpy.int32)) _subk_np_tmp[:] = subk_ assert _subk_np_tmp.flags.contiguous _subk_tmp = ctypes.cast(_subk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _subk_copyarray = False _subk_tmp = None if isinstance(fmt_,unicode): fmt_ = fmt_.encode("utf-8",errors="replace") ndims_ = None if ndims_ is None: ndims_ = len(dims_) elif ndims_ != len(dims_): raise IndexError("Inconsistent length of array dims") if dims_ is None: raise ValueError("Argument dims cannot be None") if dims_ is None: raise ValueError("Argument dims may not be None") if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous: _dims_copyarray = False _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif dims_ is not None: _dims_copyarray = True _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32)) _dims_np_tmp[:] = dims_ assert _dims_np_tmp.flags.contiguous _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _dims_copyarray = False _dims_tmp = None _sp_minlength = (num_) if (num_) > 0 and sp_ is not None and len(sp_) != (num_): raise ValueError("Array argument sp is not long enough: Is %d, expected %d" % (len(sp_),(num_))) if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous: _sp_copyarray = False _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) elif sp_ is not None: _sp_copyarray = True _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64)) _sp_np_tmp[:] = sp_ assert _sp_np_tmp.flags.contiguous _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64)) else: _sp_copyarray = False _sp_tmp = None res = __library__.MSK_XX_generateconenames(self.__nativep,num_,_subk_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateconnames(self,subi_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subi_)\n elif num_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generateconnames(self.__nativep,num_,_subi_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _build_name(self):\n self.ctrl_name = NameUtils.get_unique_name(self.asset,\n self.side,\n self.part,\n \"ctrl\")", "def generatevarnames(self,subj_,fmt_,dims_,sp_):\n num_ = None\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if isinstance(fmt_,unicode):\n fmt_ = fmt_.encode(\"utf-8\",errors=\"replace\")\n ndims_ = None\n if ndims_ is None:\n ndims_ = len(dims_)\n elif ndims_ != len(dims_):\n raise IndexError(\"Inconsistent length of array dims\")\n if dims_ is None:\n raise ValueError(\"Argument dims cannot be None\")\n if dims_ is None:\n raise ValueError(\"Argument dims may not be None\")\n if isinstance(dims_, numpy.ndarray) and dims_.dtype is numpy.dtype(numpy.int32) and dims_.flags.contiguous:\n _dims_copyarray = False\n _dims_tmp = ctypes.cast(dims_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif dims_ is not None:\n _dims_copyarray = True\n _dims_np_tmp = numpy.zeros(len(dims_),numpy.dtype(numpy.int32))\n _dims_np_tmp[:] = dims_\n assert _dims_np_tmp.flags.contiguous\n _dims_tmp = ctypes.cast(_dims_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _dims_copyarray = False\n _dims_tmp = None\n \n _sp_minlength = (num_)\n if (num_) > 0 and sp_ is not None and len(sp_) != (num_):\n raise ValueError(\"Array argument sp is not long enough: Is %d, expected %d\" % (len(sp_),(num_)))\n if isinstance(sp_, numpy.ndarray) and sp_.dtype is numpy.dtype(numpy.int64) and sp_.flags.contiguous:\n _sp_copyarray = False\n _sp_tmp = ctypes.cast(sp_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sp_ is not None:\n _sp_copyarray = True\n _sp_np_tmp = numpy.zeros(len(sp_),numpy.dtype(numpy.int64))\n _sp_np_tmp[:] = sp_\n assert _sp_np_tmp.flags.contiguous\n _sp_tmp = ctypes.cast(_sp_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sp_copyarray = False\n _sp_tmp = None\n \n res = __library__.MSK_XX_generatevarnames(self.__nativep,num_,_subj_tmp,fmt_,ndims_,_dims_tmp,_sp_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def generate_name(syllables):\n name = []\n for x in range(0, syllables):\n name.extend(generate_syllable())\n return ''.join(name).title()", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def recreate_subgraphs_name():\n global SUBGRAPHS\n for (name, subgraph) in SUBGRAPHS.items():\n subgraph.set_name(\"\\\"cluster_\" + subgraph.get_name() + \"\\\"\")", "def getconename(self,i_):\n sizename_ = (1 + self.getconenamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconename(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def init_name(self):\r\n try:\r\n rval = self.name\r\n except AttributeError:\r\n if 0:\r\n l = []\r\n for n in self.fgraph.toposort():\r\n if hasattr(n.op, \"name\") and n.op.name is not None:\r\n v = n.op.name\r\n if v.startswith(\"Composite\"):\r\n v = v[len(\"Composite\"):]\r\n else:\r\n v = n.op.__class__.__name__\r\n l.append(v)\r\n rval = \"Composite{\" + \",\".join(l) + \"}\"\r\n else:\r\n for i, r in enumerate(self.fgraph.inputs):\r\n r.name = 'i%i' % i\r\n for i, r in enumerate(self.fgraph.outputs):\r\n r.name = 'o%i' % i\r\n io = set(self.fgraph.inputs + self.fgraph.outputs)\r\n for i, r in enumerate(self.fgraph.variables):\r\n if r not in io and len(r.clients) > 1:\r\n r.name = 't%i' % i\r\n rval = \"Composite{%s}\" % str(self.fgraph)\r\n self.name = rval", "def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )", "def use_name(self):\n projection = proj.get_projection(self.conf.projections, self.spec)\n if not projection:\n projection = self.conf.default_projections[\"all\"]\n\n name = self.spec.format(projection)\n # Not everybody is working on linux...\n parts = name.split(\"/\")\n name = os.path.join(*parts)\n # Add optional suffixes based on constraints\n path_elements = [name] + self.conf.suffixes\n return \"-\".join(path_elements)", "def genPathCopasi(self,nameBase,suffix=\".cps\"):\n i=0\n nameFree=False\n while not nameFree:\n copasi_filename = os.path.join(self.run_dir,nameBase+\n str(i)+suffix)\n nameFree = not os.path.exists(copasi_filename)\n i=i+1\n return copasi_filename", "def _subconstituent_name(h):\n if h == 1:\n o = \"1st\"\n elif h == 2:\n o = \"2nd\"\n elif h == 3:\n o = \"3rd\"\n else:\n o = \"%dth\" % h\n return \"%s subconstituent\" % o", "def outname(self,sctn):\n return '%s_%s.html'%(self.basename,sctn)", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconname(self,i_):\n sizename_ = (1 + self.getconnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def _configure_image_name(self, ccd_operation_mode,\n include_star_mag=False):\n dic = ccd_operation_mode\n em_gain = '_G' + str(dic['em_gain'])\n em_mode = 'CONV'\n if dic['em_mode'] == 1:\n em_mode = 'EM'\n hss = '_HSS' + str(dic['hss'])\n preamp = '_PA' + str(dic['preamp'])\n binn = '_B' + str(dic['binn'])\n t_exp = '_TEXP' + str(dic['t_exp'])\n self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain\n\n if include_star_mag:\n star_flux = '_S' + str(self.star_magnitude)\n self.image_name += star_flux", "def _build_fname_templ(n):\n parts =[globals.ds_fn_templ.format(i='{i_ref:d}', ds='{ref}', var='{ref_var}')]\n for i in range(1, n):\n parts += [globals.ds_fn_templ.format(i='{i_ds%i:d}' % i, ds='{ds%i}' % i,\n var='{var%i}' % i)]\n return globals.ds_fn_sep.join(parts) + '.nc'", "def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)", "def gen_name():\n return choice(globals()[choice(['oc_males', 'oc_females'])]) + ' ' + choice(na_surnames)", "def generate_name():\n return random.choice(ADJECTIVES) + \"_\" + random.choice(TOOLS)", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def getconname(self,i_): # 3\n sizename_ = (1 + self.getconnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def putconname(self,i_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconname(self.__nativep,i_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def cachename_from_args(undirected, supervised, with_authors, collate_coauthorship):\n name = ''\n if undirected:\n name += 'undirected'\n else:\n name += 'directed'\n name += '-'\n if supervised:\n name += 'supervised'\n else:\n name += 'unsupervised'\n name += '-'\n if with_authors:\n if collate_coauthorship:\n name += 'collated-authors'\n else:\n name += 'first-class-authors'\n else:\n name += 'no-authors'\n return name", "def get_name(self):\n return \"catkin\"", "def cc(self, name):\n return \"\".join([n[0].upper() + n[1:] for n in name.split(\".\")])", "def _get_name(self):\n return '%s (%d)' % (\n self.panel_template.panel_name,\n self.implementation)" ]
[ "0.74384165", "0.58503294", "0.5830912", "0.5807586", "0.5752899", "0.5612548", "0.5604318", "0.55963373", "0.5569974", "0.5561796", "0.54910266", "0.54244846", "0.5389508", "0.53837425", "0.53835374", "0.5380686", "0.5340771", "0.5326651", "0.53238434", "0.53188485", "0.53112286", "0.5296712", "0.5284924", "0.5277954", "0.5271112", "0.52602196", "0.52468866", "0.52309656", "0.52287054", "0.5221423" ]
0.7758646
0
Sets the name of a constraint. putconname(self,i_,name_)
def putconname(self,i_,name_): if isinstance(name_,unicode): name_ = name_.encode("utf-8",errors="replace") res = __library__.MSK_XX_putconname(self.__nativep,i_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putconname(self,i_,name_): # 3\n res = self.__obj.putconname(i_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconename(self,j_,name_): # 3\n res = self.__obj.putconename(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconename(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconename(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self,name):\r\n self._name = __name", "def setname(self, name):\n self.__name = name", "def setName(self, name): \n\n self._name = name", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def set_name(self, _name):\n self.name = _name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def setName(self, name):\n self.name = str(name)", "def set_name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name" ]
[ "0.8193894", "0.7109492", "0.6670976", "0.66550237", "0.6557067", "0.6499532", "0.64980155", "0.6484204", "0.6475017", "0.6475017", "0.64716566", "0.64716566", "0.64716566", "0.64716566", "0.64716566", "0.64367193", "0.64367193", "0.64250505", "0.64250505", "0.64250505", "0.64250505", "0.6418103", "0.64104486", "0.64104486", "0.63906306", "0.6368906", "0.6302511", "0.6302511", "0.6302511", "0.6302511" ]
0.7829893
1
Sets the name of a variable. putvarname(self,j_,name_)
def putvarname(self,j_,name_): if isinstance(name_,unicode): name_ = name_.encode("utf-8",errors="replace") res = __library__.MSK_XX_putvarname(self.__nativep,j_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putvarname(self,j_,name_): # 3\n res = self.__obj.putvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putbarvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putbarvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def setVarName(self, theName):\n self._name = theName\n return self", "def set_name(self,name):\r\n self._name = __name", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def put_node_variable_name(self, name, index):\n # 1 - based indexing!\n assert index <= self._f.dimensions[\"num_nod_var\"]\n\n self._f.variables[\"name_nod_var\"][index - 1] = b\"\"\n self._f.variables[\"name_nod_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def putconename(self,j_,name_): # 3\n res = self.__obj.putconename(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def variable_name(self, variable_name):\n\n self._variable_name = variable_name", "def variable_name(self, variable_name):\n\n self._variable_name = variable_name", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def put_element_variable_name(self, name, index):\n self._f.variables[\"name_elem_var\"][index - 1] = b\"\"\n self._f.variables[\"name_elem_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def name(self, name):\n self.__name = name", "def putconename(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconename(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def name(self, value):\n self._name = value", "def name(self, value):\n self._name = value", "def setName(self, name):\n self.name = str(name)", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name" ]
[ "0.8883954", "0.7846457", "0.7583609", "0.7371796", "0.6804934", "0.6749491", "0.6718", "0.6678017", "0.6662674", "0.66056114", "0.66056114", "0.6533437", "0.65012705", "0.65012705", "0.6494305", "0.64568096", "0.64568096", "0.6420372", "0.64115626", "0.64104736", "0.6387735", "0.6387735", "0.6373206", "0.6362514", "0.6362514", "0.6362514", "0.6362514", "0.6362514", "0.6362514", "0.6362514" ]
0.8425244
1
Sets the name of a cone. putconename(self,j_,name_)
def putconename(self,j_,name_): if isinstance(name_,unicode): name_ = name_.encode("utf-8",errors="replace") res = __library__.MSK_XX_putconename(self.__nativep,j_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putconename(self,j_,name_): # 3\n res = self.__obj.putconename(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconname(self,i_,name_): # 3\n res = self.__obj.putconname(i_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putconname(self,i_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconname(self.__nativep,i_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)", "def set_name(self,name):\r\n self._name = __name", "def set_name(self, name):\n\t\tself.name_ = name", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\r\n self.__name = name", "def setName(self, name): \n\n self._name = name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def setName(self, name):\n self.name = str(name)", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def new_name(self,new_name):\n self.name = new_name", "def setName(self, *args):\n return _libsbml.Compartment_setName(self, *args)", "def putvarname(self,j_,name_): # 3\n res = self.__obj.putvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def set_name(self, newname=\"\"):\n self.name = newname" ]
[ "0.8515966", "0.77436495", "0.7510013", "0.6582177", "0.6530854", "0.6465643", "0.64578557", "0.64150274", "0.63634545", "0.63573754", "0.63573754", "0.6346879", "0.6346033", "0.6333818", "0.6333818", "0.6333818", "0.6333818", "0.6326524", "0.6326524", "0.6326524", "0.6326524", "0.6326524", "0.6297006", "0.6297006", "0.62947184", "0.62947184", "0.6271246", "0.6261718", "0.62590945", "0.6257631" ]
0.7865969
1
Sets the name of a semidefinite variable. putbarvarname(self,j_,name_)
def putbarvarname(self,j_,name_): if isinstance(name_,unicode): name_ = name_.encode("utf-8",errors="replace") res = __library__.MSK_XX_putbarvarname(self.__nativep,j_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarname(self,j_,name_): # 3\n res = self.__obj.putvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def put_element_variable_name(self, name, index):\n self._f.variables[\"name_elem_var\"][index - 1] = b\"\"\n self._f.variables[\"name_elem_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def put_global_variable_name(self, name, index):\n self._f.variables[\"name_glo_var\"][index - 1] = b\"\"\n self._f.variables[\"name_glo_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def setVarName(self, theName):\n self._name = theName\n return self", "def put_node_variable_name(self, name, index):\n # 1 - based indexing!\n assert index <= self._f.dimensions[\"num_nod_var\"]\n\n self._f.variables[\"name_nod_var\"][index - 1] = b\"\"\n self._f.variables[\"name_nod_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def putconename(self,j_,name_): # 3\n res = self.__obj.putconename(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def set_name(self,name):\r\n self._name = __name", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def putconename(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconename(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def variable_name(self, variable_name):\n\n self._variable_name = variable_name", "def variable_name(self, variable_name):\n\n self._variable_name = variable_name", "def var_name ( self , name ) :\n if name in self.__var_names and not NameDuplicates.allowed() :\n self.warning ( 'The variable name \"%s\" is already defined!' % name )\n \n self.__var_names.add ( name )\n self.__local_names.add ( name )\n return name", "def set_name(self, name):\r\n self.__name = name", "def name(self, name):\n self.__name = name", "def setName(self, name):\n self.name = str(name)", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def setName(self, *args):\n return _libsbml.InSpeciesTypeBond_setName(self, *args)", "def putbarsj(self,whichsol_,j_,barsj): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if barsj is None: raise TypeError(\"Invalid type for argument barsj\")\n if barsj is None:\n barsj_ = None\n else:\n try:\n barsj_ = memoryview(barsj)\n except TypeError:\n try:\n _tmparr_barsj = array.array(\"d\",barsj)\n except TypeError:\n raise TypeError(\"Argument barsj has wrong type\")\n else:\n barsj_ = memoryview(_tmparr_barsj)\n \n else:\n if barsj_.format != \"d\":\n barsj_ = memoryview(array.array(\"d\",barsj))\n \n if barsj_ is not None and len(barsj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barsj has wrong length\")\n res = self.__obj.putbarsj(whichsol_,j_,barsj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def name(self, value):\n self._name = value" ]
[ "0.8725043", "0.78685296", "0.73790705", "0.63185453", "0.62304795", "0.61820215", "0.6175833", "0.60793555", "0.60779697", "0.6025984", "0.5947061", "0.5932117", "0.58367765", "0.58295", "0.58150184", "0.5744822", "0.57223016", "0.5708944", "0.5708944", "0.57083535", "0.56835777", "0.5626449", "0.5606134", "0.5597447", "0.5597447", "0.55844706", "0.55844706", "0.5570596", "0.55700564", "0.5566725" ]
0.83028346
1
Obtains the length of the name of a variable. getvarnamelen(self,i_)
def getvarnamelen(self,i_): len_ = ctypes.c_int32() res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) len_ = len_.value _len_return_value = len_ return (_len_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconenamelen(self,i_): # 3\n res,resargs = self.__obj.getconenamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def namelength(self):\n return self[\"namelength\"]", "def getconenamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconenamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def nvar(self):\n return len(self.__vars)", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def length_name(self):\n return self._src_decoder.length_tensor_name", "def get_var_nbytes(self, var_name):\n return self.get_value_ref(var_name).nbytes", "def tester(name):\n return len(name)", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarname(self,j_):\n sizename_ = (1 + self.getvarnamelen((j_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getvarname(self.__nativep,j_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def getconnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def __len__(self) -> int:\n return len(self.variables)", "def mvarlen():\n\n global offset\n\n x=0L\n for i in range(4):\n\n try:\n byte=ord(midifile[offset])\n offset += 1\n except:\n error(\"Invalid MIDI file include (varlen->int)\")\n\n if byte < 0x80:\n x = ( x << 7 ) + byte\n break\n else:\n x = ( x << 7 ) + ( byte & 0x7f )\n\n return int(x)", "def size(self, varname):\n if self.handle == None: return []\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return []\n \n def dimlen(d):\n dim = self.handle.dimensions[d]\n if dim != None:\n t = type(dim).__name__\n if t == 'int':\n return dim\n return len(dim)\n return 0\n return map(lambda d: dimlen(d), var.dimensions)", "def length(self):\n\t\treturn self.n", "def num_vars(self):\n return self.nvars", "def num_vars(self):\n return self._nvars", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def getlenbarvarj(self,j_): # 3\n res,resargs = self.__obj.getlenbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _lenbarvarj_return_value = resargs\n return _lenbarvarj_return_value", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def namelengthsrc(self):\n return self[\"namelengthsrc\"]", "def getLength(self):\n return self.n", "def getobjnamelen(self): # 3\n res,resargs = self.__obj.getobjnamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def nvar(self):\n return len(self.v)" ]
[ "0.91696215", "0.86523247", "0.83191776", "0.74700165", "0.74440676", "0.6821054", "0.6626158", "0.6521564", "0.64515024", "0.64394104", "0.640624", "0.63913447", "0.6386585", "0.62835014", "0.6280414", "0.6236239", "0.618746", "0.6163219", "0.61378926", "0.6079825", "0.6053449", "0.60354304", "0.6031045", "0.60038364", "0.5992265", "0.59853315", "0.59731513", "0.59631467", "0.59545267", "0.589364" ]
0.8870697
1
Obtains the name of a variable. getvarname(self,j_)
def getvarname(self,j_): sizename_ = (1 + self.getvarnamelen((j_))) name_ = (ctypes.c_char * (sizename_))() res = __library__.MSK_XX_getvarname(self.__nativep,j_,sizename_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _name_retval = name_.value.decode("utf-8",errors="replace") return (_name_retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def putvarname(self,j_,name_): # 3\n res = self.__obj.putvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def varname(p):\n for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:\n m = re.search(r'\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)\n if m:\n return m.group(1)", "def get_variable_name(self, variable_index):\n return self.variable_names[variable_index - 1]", "def retrieve_name(self, var):\r\n\t\tfor fi in reversed(inspect.stack()):\r\n\t\t\tnames = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\r\n\t\t\tif len(names) > 0:\r\n\t\t\t\treturn names[0]\r\n\t\treturn \"<unknown>\"", "def putbarvarname(self,j_,name_): # 3\n res = self.__obj.putbarvarname(j_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getName(self):\n dataDict = self.__dict__\n result = self.varName\n if result is None:\n result = self.chemComp.name\n return result", "def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def _get_name(var):\n lcls = inspect.stack()[2][0].f_locals\n for name in lcls:\n if id(var) == id(lcls[name]):\n return name\n return None", "def varname(self) -> str:\n var = getattr(self.parent, \"varname\", None) if self.parent else None\n if var:\n return f\"{var}.{self.name}\"\n else:\n if self.name is None:\n raise SerdeError(\"Field name is None.\")\n return self.name", "def build_stkvar_name(*args):\n return _ida_frame.build_stkvar_name(*args)", "def retrieve_name(var):\n for fi in reversed(inspect.stack()):\n names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\n if len(names) > 0:\n return names[0]", "def get_variable_name(uuid: UUID, variable_index: int) -> Optional[str]:\n scenario: Optional[AoE2DEScenario] = store.get_scenario(uuid)\n if scenario:\n if gv := get_game_version(uuid) == \"DE\":\n variable = scenario.trigger_manager.get_variable(variable_index)\n if variable:\n return variable.name\n elif 0 <= variable_index <= 255:\n return f\"Variable {variable_index}\"\n else:\n raise ValueError(f\"Scenarios with the game version: {gv} do not support variables.\")\n return None", "def labelit(self, varname):\n \n if not varname:\n return \"\"\n return self.vardict[varname].VariableLabel or varname", "def putbarvarname(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putbarvarname(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def get_variable(self, variable_name):\n assert self.variable_name_to_index is not None\n return self.variable_name_to_index[variable_name]", "def getVariable(self, varName):\n return self[varName]", "def getvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def _get_arg_name(self, arg, variable_name):", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name" ]
[ "0.806976", "0.75603807", "0.7076541", "0.6987677", "0.6888855", "0.6750818", "0.65493554", "0.65462774", "0.6493769", "0.64399004", "0.6420192", "0.6387897", "0.6376338", "0.6375689", "0.6283686", "0.6280662", "0.627458", "0.6198005", "0.61735547", "0.616751", "0.61630327", "0.61474943", "0.6119865", "0.60785156", "0.607319", "0.60665566", "0.60665566", "0.60665566", "0.60665566", "0.60665566" ]
0.8035467
1
Obtains the length of the name of a constraint. getconnamelen(self,i_)
def getconnamelen(self,i_): len_ = ctypes.c_int32() res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) len_ = len_.value _len_return_value = len_ return (_len_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconenamelen(self,i_): # 3\n res,resargs = self.__obj.getconenamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconenamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconenamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconname(self,i_):\n sizename_ = (1 + self.getconnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getconname(self,i_): # 3\n sizename_ = (1 + self.getconnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getobjnamelen(self):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getobjnamelen(self.__nativep,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconename(self,i_):\n sizename_ = (1 + self.getconenamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconename(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getobjnamelen(self): # 3\n res,resargs = self.__obj.getobjnamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def gettasknamelen(self): # 3\n res,resargs = self.__obj.gettasknamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def gettasknamelen(self):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_gettasknamelen(self.__nativep,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def namelength(self):\n return self[\"namelength\"]", "def _get_length_const(self, length, constraints, name):\n # Check length and resolve wildcard\n lengths = [num for num, code in constraints]\n wilds = lengths.count(WILDCARD)\n assert wilds <= 1, \"Too many wildcards in sequence %s\" % name\n if wilds == 0: # no wildcards\n seq_lengths = sum(lengths)\n if length:\n assert length == seq_lengths, \"Length mismatch for sequence %s (%r != %r)\" % (name, length, seq_lengths)\n else: # If length was not specified (None), we set it\n length = seq_lengths\n else: # one wildcard\n if length == None: raise WildError(\"Sequence %s has a ?. but no length specified\" % name)\n check_length = sum([x for x in lengths if x != WILDCARD])\n wild_length = length - check_length # Wildcard is set so that total length is right\n assert wild_length >= 0, \"Sequence %s too short (%r > %r)\" % (name, length, check_length)\n i = lengths.index(WILDCARD)\n constraints[i] = (wild_length, constraints[i][1])\n \n const = \"\"\n for (num, base) in constraints:\n const += base * num # We represent constriants in long-form\n \n return length, const", "def length_name(self):\n return self._src_decoder.length_tensor_name", "def target_length_name(self):\n name = dsutils._connect_name(\n self._data_spec.name_prefix[1],\n self._tgt_decoder.length_tensor_name)\n return name", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def __len__(self):\n return len(self.constraints)", "def getbarvarname(self,i_):\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getbarvarname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getElementName(self):\n return _libsbml.ListOfConstraints_getElementName(self)", "def getconbound(self,i_): # 3\n res,resargs = self.__obj.getconbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def no_of_constraint_definitions(self):\n return len(self._constraints_df['name'].unique())", "def __len__(self):\n\t\treturn self.n", "def getconnameindex(self,somename_): # 3\n res,resargs = self.__obj.getconnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def getElementName(self):\n return _libsbml.Constraint_getElementName(self)", "def __len__(self):\n return self.n" ]
[ "0.8666037", "0.78833383", "0.745883", "0.7033554", "0.69469124", "0.6811607", "0.6778512", "0.6774401", "0.6771029", "0.6676001", "0.6658939", "0.6630528", "0.6565997", "0.65419215", "0.6404309", "0.63468707", "0.59039116", "0.564742", "0.56071514", "0.557387", "0.5483508", "0.54208976", "0.5398886", "0.5386571", "0.5283371", "0.5280204", "0.5211456", "0.5205067", "0.51862204", "0.5183962" ]
0.84394306
1
Obtains the name of a constraint. getconname(self,i_)
def getconname(self,i_): sizename_ = (1 + self.getconnamelen((i_))) name_ = (ctypes.c_char * (sizename_))() res = __library__.MSK_XX_getconname(self.__nativep,i_,sizename_,name_) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _name_retval = name_.value.decode("utf-8",errors="replace") return (_name_retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconname(self,i_): # 3\n sizename_ = (1 + self.getconnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getconename(self,i_):\n sizename_ = (1 + self.getconenamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconename(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getElementName(self):\n return _libsbml.ListOfConstraints_getElementName(self)", "def getElementName(self):\n return _libsbml.Constraint_getElementName(self)", "def __str__(self):\n return \"Constraint(attribute_name={},constraint_type={})\".format(\n self.attribute_name, self.constraint_type\n )", "def putconname(self,i_,name_): # 3\n res = self.__obj.putconname(i_,name_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconbound(self,i_): # 3\n res,resargs = self.__obj.getconbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value", "def get_constraint(self, attribute_name):\n\n for constraint in self.constraints:\n if constraint.key == attribute_name:\n return constraint\n\n # If it can't be found, return None.\n return None", "def getconnameindex(self,somename_): # 3\n res,resargs = self.__obj.getconnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def varname_for_constraint(self, node):\n if isinstance(node, ast.Attribute):\n attribute_path = self._get_attribute_path(node)\n if attribute_path:\n attributes = tuple(attribute_path[1:])\n return CompositeVariable(attribute_path[0], attributes)\n else:\n return None\n elif isinstance(node, ast.Name):\n return node.id\n else:\n return None", "def residueName(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_name(self._c_structure,i)", "def new_constraint_name(self, column, type):\n name = self.name.lstrip('migrate_')[:30]\n if type == 'UNIQUE':\n return '{}_{}_{}_uniq'.format(name, column[:15], self._random_string(8))\n elif type == 'PRIMARY KEY':\n return '{}_{}_pkey'.format(name, self._random_string(4))\n else:\n raise NotImplementedError('Name not implemented for type {}'.format(type))", "def putconname(self,i_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconname(self.__nativep,i_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _get_set(constraint):\n if \"_area\" in constraint:\n return \"loc_techs_area\"\n elif any(\n i in constraint for i in [\"resource_cap\", \"parasitic\", \"resource_min_use\"]\n ):\n return \"loc_techs_supply_plus\"\n elif (\n \"resource\" in constraint\n ): # i.e. everything with 'resource' in the name that isn't resource_cap\n return \"loc_techs_finite_resource\"\n elif (\n \"storage\" in constraint\n or \"charge_rate\" in constraint\n or \"energy_cap_per_storage_cap\" in constraint\n ):\n return \"loc_techs_store\"\n elif \"purchase\" in constraint:\n return \"loc_techs_purchase\"\n elif \"units_\" in constraint:\n return \"loc_techs_milp\"\n elif \"export\" in constraint:\n return \"loc_techs_export\"\n else:\n return \"loc_techs\"", "def getName(self):\n dataDict = self.__dict__\n result = self.varName\n if result is None:\n result = self.chemComp.name\n return result", "def equality_constraint_names(self):\n return []", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def get_constraints_of_type(self, constraint_type):\n if isinstance(constraint_type,str):\n constraint_key = constraint_type\n else:\n #it is a class\n constraint_key = constraint_type.__name__\n return self._cons_kinds[constraint_key]", "def printConstraint(self):\n # print out the names and the constraint\n print self.tail.name + \" \" + self.constraint + \" \" + self.head.name", "def cassette_name(self, request):\n marker = request.node.get_closest_marker(\"cassette_name\")\n if marker is None:\n return (\n f\"{request.cls.__name__}.{request.node.name}\"\n if request.cls\n else request.node.name\n )\n return marker.args[0]", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def get_discipline_name(self):\n return self.__discipline_name", "def name(self) -> 'Literal[\"Dynamic Reverse Address Resolution Protocol\", \"Inverse Address Resolution Protocol\", \"Reverse Address Resolution Protocol\", \"Address Resolution Protocol\"]': # pylint: disable=line-too-long\n return self._name", "def getConstraint(self, *args):\n return _libsbml.Model_getConstraint(self, *args)", "def getName(self):\n return _libsbml.Compartment_getName(self)", "def cname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname\")" ]
[ "0.7562321", "0.7176524", "0.7070297", "0.66708857", "0.6398901", "0.6233792", "0.61329055", "0.6105304", "0.59759116", "0.591931", "0.58823484", "0.58433723", "0.57034266", "0.55915326", "0.5590233", "0.5582501", "0.55802125", "0.5559249", "0.55544347", "0.55529326", "0.5523024", "0.55190504", "0.5502926", "0.5494534", "0.5486393", "0.546901", "0.5468706", "0.54606986", "0.54605246", "0.5442213" ]
0.73514813
1
Obtains the length of the name of a cone. getconenamelen(self,i_)
def getconenamelen(self,i_): len_ = ctypes.c_int32() res = __library__.MSK_XX_getconenamelen(self.__nativep,i_,ctypes.byref(len_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) len_ = len_.value _len_return_value = len_ return (_len_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getconenamelen(self,i_): # 3\n res,resargs = self.__obj.getconenamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def namelength(self):\n return self[\"namelength\"]", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getconename(self,i_):\n sizename_ = (1 + self.getconenamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconename(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getconename(self,i_): # 3\n sizename_ = (1 + self.getconenamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconename(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconname(self,i_):\n sizename_ = (1 + self.getconnamelen((i_)))\n name_ = (ctypes.c_char * (sizename_))()\n res = __library__.MSK_XX_getconname(self.__nativep,i_,sizename_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _name_retval = name_.value.decode(\"utf-8\",errors=\"replace\")\n return (_name_retval)", "def getconname(self,i_): # 3\n sizename_ = (1 + self.getconnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getconname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "async def text_channel_name_length(\n channel: Channel\n):\n return len(channel.name)", "def getobjnamelen(self):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getobjnamelen(self.__nativep,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def length_name(self):\n return self._src_decoder.length_tensor_name", "def getobjnamelen(self): # 3\n res,resargs = self.__obj.getobjnamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def __len__(self) -> int:\n return self.length", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def __len__(self):\n\t\treturn self.n", "def aln_length(self) -> int:\n return len(self)", "def length(self):\n ...", "def __len__(self):\n return self._length # pylint: disable = E1101", "def tester(name):\n return len(name)", "def length(self):\n\t\treturn self.n", "def getLength(self):\n return self.n", "def __len__(self) -> int:\n return len(self.length)", "def __len__(self) -> int:\n return self._length", "def get_string_length(self):\n return int(self.read('H')[0])" ]
[ "0.87749386", "0.78596747", "0.76864165", "0.7421929", "0.7243701", "0.7168962", "0.7091672", "0.70586526", "0.6872572", "0.68703437", "0.68578297", "0.6839808", "0.6478438", "0.63468295", "0.62435097", "0.6238064", "0.61464494", "0.6096912", "0.60698086", "0.60670877", "0.60482603", "0.60301226", "0.60298944", "0.6023718", "0.6015218", "0.60032916", "0.5998028", "0.59854496", "0.59775776", "0.5972033" ]
0.82536674
1
Obtains the number of members in a cone. getnumconemem(self,k_)
def getnumconemem(self,k_): nummem_ = ctypes.c_int32() res = __library__.MSK_XX_getnumconemem(self.__nativep,k_,ctypes.byref(nummem_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) nummem_ = nummem_.value _nummem_return_value = nummem_ return (_nummem_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getnumconemem(self,k_): # 3\n res,resargs = self.__obj.getnumconemem(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nummem_return_value = resargs\n return _nummem_return_value", "def getconeinfo(self,k_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconeinfo(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def count(r, c, k):\n\t\tif 0 <= r < M and 0 <= c < N:\n\t\t\tif A[r][c] == target[k]:\n\t\t\t\t\"*** YOUR CODE HERE ***\"\n\t\t\t\treturn memoized_count(r, c, k)\n\t\t\telse:\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0", "def getnumqconknz(self,k_): # 3\n res,resargs = self.__obj.getnumqconknz64(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n return _numqcnz_return_value", "def getconeinfo(self,k_): # 3\n res,resargs = self.__obj.getconeinfo(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def memlen(self):\n\n return len(self.memory)", "def getcone(self,k_,submem_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n _submem_minlength = self.getconeinfo((k_))[2]\n if self.getconeinfo((k_))[2] > 0 and submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem is not long enough: Is %d, expected %d\" % (len(submem_),self.getconeinfo((k_))[2]))\n if isinstance(submem_,numpy.ndarray) and not submem_.flags.writeable:\n raise ValueError(\"Argument submem must be writable\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_getcone(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_),_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n if _submem_copyarray:\n submem_[:] = _submem_np_tmp\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def getnumqconknz(self,k_):\n numqcnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumqconknz64(self.__nativep,k_,ctypes.byref(numqcnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numqcnz_ = numqcnz_.value\n _numqcnz_return_value = numqcnz_\n return (_numqcnz_return_value)", "def getnumcone(self):\n numcone_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumcone(self.__nativep,ctypes.byref(numcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numcone_ = numcone_.value\n _numcone_return_value = numcone_\n return (_numcone_return_value)", "def getnumcone(self): # 3\n res,resargs = self.__obj.getnumcone()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numcone_return_value = resargs\n return _numcone_return_value", "def __len__(self):\n return self.count_of(CUBA.NODE)", "def __len__(self):\n return self._n", "def count(self, conn, key):\n return conn.llen(key)", "def __len__(self):\n\t\treturn self.n", "def member_count(self):\n return len(self.members)", "def N ( self ) :\n return self.__N", "def nClumps(self):\n \n return len(self)", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def __len__(self):\n return self.n", "def num_conll(self):\n pass", "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def __len__(self):\r\n return self.n" ]
[ "0.87193775", "0.65474", "0.63329744", "0.6301218", "0.6256797", "0.60519123", "0.60468775", "0.6043543", "0.6008538", "0.6007757", "0.5989896", "0.5959924", "0.59045166", "0.58832043", "0.5873087", "0.5872675", "0.58704096", "0.58684206", "0.5850679", "0.5849518", "0.5849518", "0.5849518", "0.5849518", "0.5849518", "0.5849518", "0.5849518", "0.5849518", "0.5847403", "0.58470994", "0.58437955" ]
0.79310596
1
Obtains the number of parameters of a given type. getnumparam(self,partype_)
def getnumparam(self,partype_): numparam_ = ctypes.c_int32() res = __library__.MSK_XX_getnumparam(self.__nativep,partype_,ctypes.byref(numparam_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numparam_ = numparam_.value _numparam_return_value = numparam_ return (_numparam_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getnumparam(self,partype_): # 3\n if not isinstance(partype_,parametertype): raise TypeError(\"Argument partype has wrong type\")\n res,resargs = self.__obj.getnumparam(partype_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numparam_return_value = resargs\n return _numparam_return_value", "def num_params(self):", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def num_params(self) -> int:\n return self._num_params", "def num_param(self):\n return len(self._parameters)", "def n_parameters(self):\n return self.pdm.n_parameters", "def _n_parameters(self):\n raise NotImplementedError", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def get_num_parameters(self):\n return len(self.parameters)", "def get_params_count(self):\n\t\treturn call_sdk_function('PrlResult_GetParamsCount', self.handle)", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def N(self) -> int:\n return self.params.N", "def number_of_nodes(self, ntype: str = None) -> int:\n return self.num_nodes(ntype)", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def num_nodes(self, ntype: str = None) -> int:\n if ntype:\n return self.num_nodes_dict[ntype]\n else:\n return self.total_number_of_nodes", "def num_params(self):\n return len(self.params)", "def get_parameter_number(net):\n # print(type(net.parameters()))\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def batch_num_nodes(self, ntype=None):\n return self._batch_num_nodes[self.get_ntype_id(ntype)]", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def calc_block_num_params2(net):\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n return weight_count", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters" ]
[ "0.8639025", "0.68901056", "0.6834211", "0.6744843", "0.6720308", "0.65994984", "0.65801895", "0.64843345", "0.64329004", "0.63957036", "0.6382901", "0.637191", "0.6365708", "0.6353071", "0.62921935", "0.6271592", "0.6260164", "0.6259933", "0.62220746", "0.6217816", "0.6195614", "0.613535", "0.6114513", "0.6109386", "0.6103922", "0.6103922", "0.6103922", "0.6101598", "0.60911995", "0.6083041" ]
0.770266
1
Obtains the number of nonzero quadratic terms in a constraint. getnumqconknz(self,k_)
def getnumqconknz(self,k_): numqcnz_ = ctypes.c_int64() res = __library__.MSK_XX_getnumqconknz64(self.__nativep,k_,ctypes.byref(numqcnz_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numqcnz_ = numqcnz_.value _numqcnz_return_value = numqcnz_ return (_numqcnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getnumqconknz(self,k_): # 3\n res,resargs = self.__obj.getnumqconknz64(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n return _numqcnz_return_value", "def NumCoefficients(self):\n return nchoosek(self.degree + self.dimension, self.degree, exact=True)", "def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def k1(self):\n if self._k1 is None:\n self._k1 = numpy.sum(self.zk[self.nk > 0])\n return self._k1", "def zernike_num_coeff(n):\n \n\tif not (n>=0):\n\t\tprint('Input parameter must be >= 0')\n\t\traise AssertionError() \n \n\treturn sum(xrange(n+1)) + n+1", "def count_NN(KL):\n zvals = (KL != 0).sum(1)\n return zvals", "def N_z(self) -> int:\n return self.params.N_z", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def getnumbarcnz(self): # 3\n res,resargs = self.__obj.getnumbarcnz()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nz_return_value = resargs\n return _nz_return_value", "def binomial_coefficient(n, k):\n if 0 <= k <= n:\n return reduce(lambda a, b: a * (n - b) / (b + 1), xrange(k), 1)\n else:\n return 0", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def getnumqobjnz(self): # 3\n res,resargs = self.__obj.getnumqobjnz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqonz_return_value = resargs\n return _numqonz_return_value", "def kaltzCentrality(graph, numberOfPoints):\n c_eigenvector = nx.katz_centrality(graph)\n c_eigenvector = heapq.nlargest(\n numberOfPoints, list(\n c_eigenvector.values()))\n return c_eigenvector", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def num_qubits(self) -> int:\n return self._circuit.num_qubits", "def num_qubits(self) -> int:\n raise NotImplementedError()", "def getnumbarcnz(self):\n nz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumbarcnz(self.__nativep,ctypes.byref(nz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nz_ = nz_.value\n _nz_return_value = nz_\n return (_nz_return_value)", "def nC(self):\n return int(self.vnC.prod())", "def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)", "def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count", "def n_choose_k(n: int, k: int) -> int:\n # Edge case, no possible way to choose.\n if k > n or k < 0 or n < 0: return 0\n # We choose the min of k or n - k\n # since nCk == nC(n - k).\n k = min(k, n - k)\n # The numerator represents the product\n # n * (n - 1) * (n - 2) * ... * (n - k - 1)\n numerator = reduce(mul, range(n, n - k, -1), 1)\n # The denominator represents the product\n # 1 * 2 * ... * k\n denominator = reduce(mul, range(1, k + 1), 1)\n # return the result as an integer.\n return numerator // denominator", "def getnumconemem(self,k_): # 3\n res,resargs = self.__obj.getnumconemem(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nummem_return_value = resargs\n return _nummem_return_value", "def n_qubits(self):\n return int(np.log2(len(self.mat)))", "def test_compar(K):\n K_int = int(np.ceil(K))\n n_k = len(bin(K_int))-1\n complement = np.binary_repr(-K_int, width=n_k)\n qr = QuantumRegister(5, 'q')\n qc = QuantumCircuit(qr)\n for i in range(3):\n qc.h(qr[i])\n qc.ccx(qr[0], qr[1], qr[3])\n for i in [2, 3, 4]:\n qc.x(qr[i])\n qc.ccx(qr[2], qr[3], qr[4])\n for i in [2, 3]:\n qc.x(qr[i])\n qc.ccx(qr[0], qr[1], qr[3])\n circ_m = measure(qc, qr, [i for i in range(5)])\n counts = launch(4000, circ_m)\n print(counts)\n print(complement)", "def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)", "def nC(self):\n return int(self._n.prod())" ]
[ "0.81055516", "0.69355136", "0.6510874", "0.6368569", "0.6130344", "0.6097802", "0.6064723", "0.59739923", "0.59143233", "0.5902763", "0.58940965", "0.587751", "0.5876627", "0.5862211", "0.58392245", "0.5836491", "0.57720524", "0.57618713", "0.573745", "0.57012874", "0.56852096", "0.568404", "0.5681849", "0.5656043", "0.56341916", "0.56332535", "0.562342", "0.55938166", "0.55922633", "0.55793685" ]
0.7470945
1
Obtains the number of nonzero quadratic terms in the objective. getnumqobjnz(self)
def getnumqobjnz(self): numqonz_ = ctypes.c_int64() res = __library__.MSK_XX_getnumqobjnz64(self.__nativep,ctypes.byref(numqonz_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numqonz_ = numqonz_.value _numqonz_return_value = numqonz_ return (_numqonz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getnumqobjnz(self): # 3\n res,resargs = self.__obj.getnumqobjnz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqonz_return_value = resargs\n return _numqonz_return_value", "def nnz(self):\n return self.rep.nnz()", "def nnz(self):\n return self.to_ddm().nnz()", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def nnz(self):\n return len(self.value)", "def nnz(self):", "def nnz(self):\n return len(self.data)", "def getnumbarcnz(self): # 3\n res,resargs = self.__obj.getnumbarcnz()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nz_return_value = resargs\n return _nz_return_value", "def nnz(self):\n\t\treturn self.st.size()", "def N_z(self) -> int:\n return self.params.N_z", "def NNZ(self):\n return len(self.__IndList)", "def NNZ(self):\n return _hypre.HypreParMatrix_NNZ(self)", "def getmaxnumqnz(self): # 3\n res,resargs = self.__obj.getmaxnumqnz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumqnz_return_value = resargs\n return _maxnumqnz_return_value", "def getnumqconknz(self,k_): # 3\n res,resargs = self.__obj.getnumqconknz64(k_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n return _numqcnz_return_value", "def num_qubits(self) -> int:\n raise NotImplementedError()", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def getNZ(self):\n return self._get_nz( )", "def stats(self):\n nqbits = self.operator.num_qubits", "def nnz(self):\n t = self.get_MSC()\n return len(np.unique(t['masks']))", "def n_qubits(self):\n return int(np.log2(len(self.mat)))", "def NumCoefficients(self):\n return nchoosek(self.degree + self.dimension, self.degree, exact=True)", "def num_qubits(self) -> int:\n return super().num_qubits", "def num_qubits(self) -> int:\n return self._circuit.num_qubits", "def nz(self):\n return self._dim[2]", "def get_Q_size(self):\n return len(self.qTable)", "def getnumbarcnz(self):\n nz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumbarcnz(self.__nativep,ctypes.byref(nz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nz_ = nz_.value\n _nz_return_value = nz_\n return (_nz_return_value)", "def get_active_coeff(self):\r\n num_active_coeff = 0\r\n for coefficient in self.model_.coef_:\r\n if abs(coefficient) > 0:\r\n num_active_coeff += 1\r\n return num_active_coeff", "def n(self):\n if not self.table:\n return 0\n return max(self.omega) + 1", "def nquads(self):\n return len(self.tree)", "def get_local_nnz(self):\n return _hypre.HypreParMatrix_get_local_nnz(self)" ]
[ "0.82374734", "0.6691824", "0.6654184", "0.6631789", "0.6566139", "0.6549166", "0.6526288", "0.6477858", "0.64586747", "0.6448433", "0.6430575", "0.631171", "0.6284648", "0.6211166", "0.6139018", "0.6122343", "0.61054796", "0.6086818", "0.6061201", "0.59945405", "0.59900707", "0.59065473", "0.5896684", "0.5762807", "0.5740436", "0.5732201", "0.5716604", "0.5698827", "0.56684995", "0.56630826" ]
0.72892433
1
Obtains maximum number of symmetric matrix variables for which space is currently preallocated. getmaxnumbarvar(self)
def getmaxnumbarvar(self): maxnumbarvar_ = ctypes.c_int32() res = __library__.MSK_XX_getmaxnumbarvar(self.__nativep,ctypes.byref(maxnumbarvar_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) maxnumbarvar_ = maxnumbarvar_.value _maxnumbarvar_return_value = maxnumbarvar_ return (_maxnumbarvar_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getmaxnumbarvar(self): # 3\n res,resargs = self.__obj.getmaxnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumbarvar_return_value = resargs\n return _maxnumbarvar_return_value", "def getmaxnumvar(self): # 3\n res,resargs = self.__obj.getmaxnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumvar_return_value = resargs\n return _maxnumvar_return_value", "def putmaxnumbarvar(self,maxnumbarvar_): # 3\n res = self.__obj.putmaxnumbarvar(maxnumbarvar_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getmaxnumvar(self):\n maxnumvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getmaxnumvar(self.__nativep,ctypes.byref(maxnumvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumvar_ = maxnumvar_.value\n _maxnumvar_return_value = maxnumvar_\n return (_maxnumvar_return_value)", "def getnumbarvar(self): # 3\n res,resargs = self.__obj.getnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numbarvar_return_value = resargs\n return _numbarvar_return_value", "def putmaxnumbarvar(self,maxnumbarvar_):\n res = __library__.MSK_XX_putmaxnumbarvar(self.__nativep,maxnumbarvar_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def maxdim(self):\n return self._maxdim", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def getmaxnumqnz(self): # 3\n res,resargs = self.__obj.getmaxnumqnz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumqnz_return_value = resargs\n return _maxnumqnz_return_value", "def getnumbarvar(self):\n numbarvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumbarvar(self.__nativep,ctypes.byref(numbarvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numbarvar_ = numbarvar_.value\n _numbarvar_return_value = numbarvar_\n return (_numbarvar_return_value)", "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "def getmaxnumcon(self): # 3\n res,resargs = self.__obj.getmaxnumcon()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumcon_return_value = resargs\n return _maxnumcon_return_value", "def max_memory_gib(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_memory_gib\")", "def nvar(self):\n return self.h.shape[0]", "def putmaxnumvar(self,maxnumvar_): # 3\n res = self.__obj.putmaxnumvar(maxnumvar_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getmaxnumqnz(self):\n maxnumqnz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getmaxnumqnz64(self.__nativep,ctypes.byref(maxnumqnz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumqnz_ = maxnumqnz_.value\n _maxnumqnz_return_value = maxnumqnz_\n return (_maxnumqnz_return_value)", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def nvar(self):\n return len(self.__vars)", "def get_v_max(self) -> int:\n return len(self.vocabulary)", "def cargo_max(self) -> Union[float, int]:\n return self.proto.cargo_space_max", "def getnumvar(self): # 3\n res,resargs = self.__obj.getnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numvar_return_value = resargs\n return _numvar_return_value", "def max_num_neighbors(self):\n return self._max_num_neighbors", "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "def get_max_rois(self):\n \n maxsize = 0\n for index in self.SampleID:\n rois = self.__getrois__(index);\n maxsize = max(maxsize, rois.shape[0])\n \n return maxsize", "def max_in_gbps(self):\n return self._max_in_gbps", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def get_state_observed_max(self):\n maxValues = numpy.zeros(self.get_num_variables())\n i = 0\n for v in self.variables:\n maxValues[i] = v.get_max_value()\n i += 1\n return maxValues", "def _get_maxth(self):\n return self.__maxth" ]
[ "0.8353058", "0.7625918", "0.7178526", "0.7173014", "0.6900788", "0.67549825", "0.67534035", "0.6604703", "0.65953386", "0.65564364", "0.6491961", "0.6491961", "0.64415246", "0.6377342", "0.6367016", "0.6346783", "0.63250744", "0.63224643", "0.6280085", "0.623158", "0.6207333", "0.6171416", "0.6159507", "0.61587626", "0.6158434", "0.61514854", "0.6138942", "0.6122698", "0.61125124", "0.6101226" ]
0.7973662
1
Obtains the dimension of a symmetric matrix variable. getdimbarvarj(self,j_)
def getdimbarvarj(self,j_): dimbarvarj_ = ctypes.c_int32() res = __library__.MSK_XX_getdimbarvarj(self.__nativep,j_,ctypes.byref(dimbarvarj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) dimbarvarj_ = dimbarvarj_.value _dimbarvarj_return_value = dimbarvarj_ return (_dimbarvarj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdimbarvarj(self,j_): # 3\n res,resargs = self.__obj.getdimbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dimbarvarj_return_value = resargs\n return _dimbarvarj_return_value", "def getlenbarvarj(self,j_): # 3\n res,resargs = self.__obj.getlenbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _lenbarvarj_return_value = resargs\n return _lenbarvarj_return_value", "def getlenbarvarj(self,j_):\n lenbarvarj_ = ctypes.c_int64()\n res = __library__.MSK_XX_getlenbarvarj(self.__nativep,j_,ctypes.byref(lenbarvarj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n lenbarvarj_ = lenbarvarj_.value\n _lenbarvarj_return_value = lenbarvarj_\n return (_lenbarvarj_return_value)", "def nvar(self):\n return self.h.shape[0]", "def getbarcsparsity(self,idxj_):\n maxnumnz_ = self.getnumbarcnz()\n numnz_ = ctypes.c_int64()\n _idxj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and idxj_ is not None and len(idxj_) != (maxnumnz_):\n raise ValueError(\"Array argument idxj is not long enough: Is %d, expected %d\" % (len(idxj_),(maxnumnz_)))\n if isinstance(idxj_,numpy.ndarray) and not idxj_.flags.writeable:\n raise ValueError(\"Argument idxj must be writable\")\n if idxj_ is None:\n raise ValueError(\"Argument idxj may not be None\")\n if isinstance(idxj_, numpy.ndarray) and idxj_.dtype is numpy.dtype(numpy.int64) and idxj_.flags.contiguous:\n _idxj_copyarray = False\n _idxj_tmp = ctypes.cast(idxj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif idxj_ is not None:\n _idxj_copyarray = True\n _idxj_np_tmp = numpy.zeros(len(idxj_),numpy.dtype(numpy.int64))\n _idxj_np_tmp[:] = idxj_\n assert _idxj_np_tmp.flags.contiguous\n _idxj_tmp = ctypes.cast(_idxj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _idxj_copyarray = False\n _idxj_tmp = None\n \n res = __library__.MSK_XX_getbarcsparsity(self.__nativep,maxnumnz_,ctypes.byref(numnz_),_idxj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n if _idxj_copyarray:\n idxj_[:] = _idxj_np_tmp\n return (_numnz_return_value)", "def dim(self):\n\t\treturn self.D", "def getbarcsparsity(self,idxj): # 3\n maxnumnz_ = self.getnumbarcnz()\n if idxj is None: raise TypeError(\"Invalid type for argument idxj\")\n _copyback_idxj = False\n if idxj is None:\n idxj_ = None\n else:\n try:\n idxj_ = memoryview(idxj)\n except TypeError:\n try:\n _tmparr_idxj = array.array(\"q\",idxj)\n except TypeError:\n raise TypeError(\"Argument idxj has wrong type\")\n else:\n idxj_ = memoryview(_tmparr_idxj)\n _copyback_idxj = True\n else:\n if idxj_.format != \"q\":\n idxj_ = memoryview(array.array(\"q\",idxj))\n _copyback_idxj = True\n if idxj_ is not None and len(idxj_) != (maxnumnz_):\n raise ValueError(\"Array argument idxj has wrong length\")\n res,resargs = self.__obj.getbarcsparsity(maxnumnz_,idxj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n if _copyback_idxj:\n idxj[:] = _tmparr_idxj\n return _numnz_return_value", "def dim(self):\n return self._dim", "def dim(self):\n return self._d", "def dim(self):\n return self.__dim__", "def dim(self):\n if '_dim' in self.__dict__:\n return self._dim\n\n if len(self._Vrepresentation)==0:\n self._dim = -1\n return self._dim\n\n origin = vector(self._Vrepresentation[0])\n v_list = [ vector(v)-origin for v in self._Vrepresentation ]\n self._dim = matrix(v_list).rank()\n return self._dim", "def get_dim(self):\n return self.dim", "def get_dim(self):\n return self._dim", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def getbarxj(self,whichsol_,j_,barxj_):\n _barxj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barxj_ is not None and len(barxj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barxj is not long enough: Is %d, expected %d\" % (len(barxj_),self.getlenbarvarj((j_))))\n if isinstance(barxj_,numpy.ndarray) and not barxj_.flags.writeable:\n raise ValueError(\"Argument barxj must be writable\")\n if barxj_ is None:\n raise ValueError(\"Argument barxj may not be None\")\n if isinstance(barxj_, numpy.ndarray) and barxj_.dtype is numpy.dtype(numpy.float64) and barxj_.flags.contiguous:\n _barxj_copyarray = False\n _barxj_tmp = ctypes.cast(barxj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barxj_ is not None:\n _barxj_copyarray = True\n _barxj_np_tmp = numpy.zeros(len(barxj_),numpy.dtype(numpy.float64))\n _barxj_np_tmp[:] = barxj_\n assert _barxj_np_tmp.flags.contiguous\n _barxj_tmp = ctypes.cast(_barxj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barxj_copyarray = False\n _barxj_tmp = None\n \n res = __library__.MSK_XX_getbarxj(self.__nativep,whichsol_,j_,_barxj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _barxj_copyarray:\n barxj_[:] = _barxj_np_tmp", "def dimension(self):\n return self._dim", "def dim(self):\n return self.m, self.n", "def getbarsj(self,whichsol_,j_,barsj_):\n _barsj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barsj_ is not None and len(barsj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barsj is not long enough: Is %d, expected %d\" % (len(barsj_),self.getlenbarvarj((j_))))\n if isinstance(barsj_,numpy.ndarray) and not barsj_.flags.writeable:\n raise ValueError(\"Argument barsj must be writable\")\n if barsj_ is None:\n raise ValueError(\"Argument barsj may not be None\")\n if isinstance(barsj_, numpy.ndarray) and barsj_.dtype is numpy.dtype(numpy.float64) and barsj_.flags.contiguous:\n _barsj_copyarray = False\n _barsj_tmp = ctypes.cast(barsj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barsj_ is not None:\n _barsj_copyarray = True\n _barsj_np_tmp = numpy.zeros(len(barsj_),numpy.dtype(numpy.float64))\n _barsj_np_tmp[:] = barsj_\n assert _barsj_np_tmp.flags.contiguous\n _barsj_tmp = ctypes.cast(_barsj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barsj_copyarray = False\n _barsj_tmp = None\n \n res = __library__.MSK_XX_getbarsj(self.__nativep,whichsol_,j_,_barsj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _barsj_copyarray:\n barsj_[:] = _barsj_np_tmp", "def n_dim(self):\n return self._n_dim", "def dim(self) -> int:", "def dimension(self):\n\t\treturn self.d", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def dimension(self):\n return self.__N", "def ndim(self):\n return np.ndim(self.MJD)", "def dim(self):\n if not self.is_indexed():\n return 0\n return getattr(self._index, 'dimen', 0)", "def dimensionality(self):\n return int(self.nDims)", "def ndim(self):\n return len(self.nvars)", "def dim(self):\n return self._domain.dim", "def dim(self) -> int:\n return self._n_dim" ]
[ "0.8157939", "0.7234874", "0.7118234", "0.66493785", "0.62534416", "0.6217813", "0.61596256", "0.6108693", "0.610009", "0.6096139", "0.6055242", "0.60185647", "0.59672534", "0.59501725", "0.59287465", "0.58893055", "0.5870976", "0.58589756", "0.58523554", "0.58254814", "0.5825349", "0.5799651", "0.57861614", "0.5760376", "0.57570297", "0.5741554", "0.573739", "0.57312244", "0.570173", "0.56973195" ]
0.7893175
1
Obtains the length of one semidefinite variable. getlenbarvarj(self,j_)
def getlenbarvarj(self,j_): lenbarvarj_ = ctypes.c_int64() res = __library__.MSK_XX_getlenbarvarj(self.__nativep,j_,ctypes.byref(lenbarvarj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) lenbarvarj_ = lenbarvarj_.value _lenbarvarj_return_value = lenbarvarj_ return (_lenbarvarj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getlenbarvarj(self,j_): # 3\n res,resargs = self.__obj.getlenbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _lenbarvarj_return_value = resargs\n return _lenbarvarj_return_value", "def getdimbarvarj(self,j_): # 3\n res,resargs = self.__obj.getdimbarvarj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dimbarvarj_return_value = resargs\n return _dimbarvarj_return_value", "def getdimbarvarj(self,j_):\n dimbarvarj_ = ctypes.c_int32()\n res = __library__.MSK_XX_getdimbarvarj(self.__nativep,j_,ctypes.byref(dimbarvarj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dimbarvarj_ = dimbarvarj_.value\n _dimbarvarj_return_value = dimbarvarj_\n return (_dimbarvarj_return_value)", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def nvar(self):\n return self.h.shape[0]", "def nvar(self):\n return len(self.v)", "def nvar(self):\n return len(self.__vars)", "def __len__(self):\n return len(self._varvals)", "def getbarsj(self,whichsol_,j_,barsj_):\n _barsj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barsj_ is not None and len(barsj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barsj is not long enough: Is %d, expected %d\" % (len(barsj_),self.getlenbarvarj((j_))))\n if isinstance(barsj_,numpy.ndarray) and not barsj_.flags.writeable:\n raise ValueError(\"Argument barsj must be writable\")\n if barsj_ is None:\n raise ValueError(\"Argument barsj may not be None\")\n if isinstance(barsj_, numpy.ndarray) and barsj_.dtype is numpy.dtype(numpy.float64) and barsj_.flags.contiguous:\n _barsj_copyarray = False\n _barsj_tmp = ctypes.cast(barsj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barsj_ is not None:\n _barsj_copyarray = True\n _barsj_np_tmp = numpy.zeros(len(barsj_),numpy.dtype(numpy.float64))\n _barsj_np_tmp[:] = barsj_\n assert _barsj_np_tmp.flags.contiguous\n _barsj_tmp = ctypes.cast(_barsj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barsj_copyarray = False\n _barsj_tmp = None\n \n res = __library__.MSK_XX_getbarsj(self.__nativep,whichsol_,j_,_barsj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _barsj_copyarray:\n barsj_[:] = _barsj_np_tmp", "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getbarxj(self,whichsol_,j_,barxj_):\n _barxj_minlength = self.getlenbarvarj((j_))\n if self.getlenbarvarj((j_)) > 0 and barxj_ is not None and len(barxj_) != self.getlenbarvarj((j_)):\n raise ValueError(\"Array argument barxj is not long enough: Is %d, expected %d\" % (len(barxj_),self.getlenbarvarj((j_))))\n if isinstance(barxj_,numpy.ndarray) and not barxj_.flags.writeable:\n raise ValueError(\"Argument barxj must be writable\")\n if barxj_ is None:\n raise ValueError(\"Argument barxj may not be None\")\n if isinstance(barxj_, numpy.ndarray) and barxj_.dtype is numpy.dtype(numpy.float64) and barxj_.flags.contiguous:\n _barxj_copyarray = False\n _barxj_tmp = ctypes.cast(barxj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif barxj_ is not None:\n _barxj_copyarray = True\n _barxj_np_tmp = numpy.zeros(len(barxj_),numpy.dtype(numpy.float64))\n _barxj_np_tmp[:] = barxj_\n assert _barxj_np_tmp.flags.contiguous\n _barxj_tmp = ctypes.cast(_barxj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _barxj_copyarray = False\n _barxj_tmp = None\n \n res = __library__.MSK_XX_getbarxj(self.__nativep,whichsol_,j_,_barxj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _barxj_copyarray:\n barxj_[:] = _barxj_np_tmp", "def getvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def get_height(self, vars: np.ndarray, i: int, j: int) -> float:\n\n if 1 <= i <= self.n - 2 and 1 <= j <= self.n - 2:\n return vars[(self.n - 2) * (i - 1) + (j - 1)]\n else:\n return self.r(i/(self.n-1), j/(self.n-1))", "def __len__(self) -> int:\n return len(self.variables)", "def leg_length(self, *args):\n i, j = args\n return Partition(list(self)).leg_length(i-1, j-1)", "def upper_hook_length(self, i, j, parameter):\n leg = self.circle_star().leg_length(i, j)\n arm = self.star().arm_length(i, j)\n return leg + parameter*(arm + 1)", "def getbarcsparsity(self,idxj_):\n maxnumnz_ = self.getnumbarcnz()\n numnz_ = ctypes.c_int64()\n _idxj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and idxj_ is not None and len(idxj_) != (maxnumnz_):\n raise ValueError(\"Array argument idxj is not long enough: Is %d, expected %d\" % (len(idxj_),(maxnumnz_)))\n if isinstance(idxj_,numpy.ndarray) and not idxj_.flags.writeable:\n raise ValueError(\"Argument idxj must be writable\")\n if idxj_ is None:\n raise ValueError(\"Argument idxj may not be None\")\n if isinstance(idxj_, numpy.ndarray) and idxj_.dtype is numpy.dtype(numpy.int64) and idxj_.flags.contiguous:\n _idxj_copyarray = False\n _idxj_tmp = ctypes.cast(idxj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif idxj_ is not None:\n _idxj_copyarray = True\n _idxj_np_tmp = numpy.zeros(len(idxj_),numpy.dtype(numpy.int64))\n _idxj_np_tmp[:] = idxj_\n assert _idxj_np_tmp.flags.contiguous\n _idxj_tmp = ctypes.cast(_idxj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _idxj_copyarray = False\n _idxj_tmp = None\n \n res = __library__.MSK_XX_getbarcsparsity(self.__nativep,maxnumnz_,ctypes.byref(numnz_),_idxj_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numnz_ = numnz_.value\n _numnz_return_value = numnz_\n if _idxj_copyarray:\n idxj_[:] = _idxj_np_tmp\n return (_numnz_return_value)", "def getvartype(self,j_): # 3\n res,resargs = self.__obj.getvartype(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _vartype_return_value = resargs\n _vartype_return_value = variabletype(_vartype_return_value)\n return _vartype_return_value", "def num_vars(self):\n return len(self.bounds.lb)", "def calculate_dvec_spin_fixed_j(self, jorb: int) -> 'Nparray':\n return self._calculate_dvec_spin_with_coeff_fixed_j(self.coeff, jorb)", "def getnumbarvar(self): # 3\n res,resargs = self.__obj.getnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numbarvar_return_value = resargs\n return _numbarvar_return_value", "def __len__(self):\n\n value_length = []\n for v in chain(self.values(), self.metainfo_values()):\n if isinstance(v, LabelData):\n value_length.append(v.label.shape[0])\n elif is_splitable_var(v):\n value_length.append(len(v))\n else:\n continue\n\n # NOTE: If length of values are not same or the current data sample\n # is empty, return length as 1\n if len(list(set(value_length))) != 1:\n return 1\n\n length = value_length[0]\n return length", "def length(self):\n\t\treturn self.n", "def getvartype(self,j_):\n vartype_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvartype(self.__nativep,j_,ctypes.byref(vartype_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _vartype_return_value = variabletype(vartype_.value)\n return (_vartype_return_value)", "def getbarcsparsity(self,idxj): # 3\n maxnumnz_ = self.getnumbarcnz()\n if idxj is None: raise TypeError(\"Invalid type for argument idxj\")\n _copyback_idxj = False\n if idxj is None:\n idxj_ = None\n else:\n try:\n idxj_ = memoryview(idxj)\n except TypeError:\n try:\n _tmparr_idxj = array.array(\"q\",idxj)\n except TypeError:\n raise TypeError(\"Argument idxj has wrong type\")\n else:\n idxj_ = memoryview(_tmparr_idxj)\n _copyback_idxj = True\n else:\n if idxj_.format != \"q\":\n idxj_ = memoryview(array.array(\"q\",idxj))\n _copyback_idxj = True\n if idxj_ is not None and len(idxj_) != (maxnumnz_):\n raise ValueError(\"Array argument idxj has wrong length\")\n res,resargs = self.__obj.getbarcsparsity(maxnumnz_,idxj_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numnz_return_value = resargs\n if _copyback_idxj:\n idxj[:] = _tmparr_idxj\n return _numnz_return_value", "def calculate_dvec_spatial_fixed_j(self, jorb: int) -> 'Nparray':\n return self._calculate_dvec_spatial_with_coeff_fixed_j(self.coeff, jorb)", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def ndim(self):\n return len(self.nvars)", "def jval(self):\n return self.q * self.model.nobs_moms" ]
[ "0.88877654", "0.75584835", "0.7211314", "0.6690643", "0.66048795", "0.6505069", "0.6360071", "0.6273677", "0.61290354", "0.61180645", "0.60778457", "0.604806", "0.5983262", "0.5907069", "0.583178", "0.5763323", "0.57298106", "0.5695627", "0.56907165", "0.5678655", "0.5666649", "0.56217366", "0.5590218", "0.5586431", "0.55834633", "0.5582958", "0.55736315", "0.5570804", "0.55547196", "0.55504227" ]
0.8653341
1
Obtains the length of the name assigned to the objective function. getobjnamelen(self)
def getobjnamelen(self): len_ = ctypes.c_int32() res = __library__.MSK_XX_getobjnamelen(self.__nativep,ctypes.byref(len_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) len_ = len_.value _len_return_value = len_ return (_len_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getobjnamelen(self): # 3\n res,resargs = self.__obj.getobjnamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getobjname(self):\n sizeobjname_ = (1 + self.getobjnamelen())\n objname_ = (ctypes.c_char * (sizeobjname_))()\n res = __library__.MSK_XX_getobjname(self.__nativep,sizeobjname_,objname_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _objname_retval = objname_.value.decode(\"utf-8\",errors=\"replace\")\n return (_objname_retval)", "def getconnamelen(self,i_): # 3\n res,resargs = self.__obj.getconnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def namelength(self):\n return self[\"namelength\"]", "def gettasknamelen(self): # 3\n res,resargs = self.__obj.gettasknamelen()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getobjname(self): # 3\n sizeobjname_ = (1 + self.getobjnamelen())\n arr_objname = array.array(\"b\",[0]*((sizeobjname_)))\n memview_arr_objname = memoryview(arr_objname)\n res,resargs = self.__obj.getobjname(sizeobjname_,memview_arr_objname)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_objname = resargs\n retarg_objname = arr_objname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_objname", "def gettasknamelen(self):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_gettasknamelen(self.__nativep,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconenamelen(self,i_): # 3\n res,resargs = self.__obj.getconenamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def getvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getbarvarnamelen(self,i_): # 3\n res,resargs = self.__obj.getbarvarnamelen(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value = resargs\n return _len_return_value", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def length_name(self):\n return self._src_decoder.length_tensor_name", "def getName(obj):", "def getbarvarnamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def getconenamelen(self,i_):\n len_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconenamelen(self.__nativep,i_,ctypes.byref(len_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n return (_len_return_value)", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def _get_object_name(self) :\n\t\ttry :\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def __len__(self):\n\t\treturn self.n", "def getName(self):\n return _libsbml.FluxObjective_getName(self)", "def getName(self):\n return _libsbml.Objective_getName(self)", "def name(self) -> str:\n return f\"{self._obj_name} count\"", "def target_length_name(self):\n name = dsutils._connect_name(\n self._data_spec.name_prefix[1],\n self._tgt_decoder.length_tensor_name)\n return name", "def __len__(self):\n return self.__length" ]
[ "0.8600193", "0.68811417", "0.68546593", "0.68029124", "0.6799229", "0.6797271", "0.67207515", "0.6687548", "0.66241795", "0.66220766", "0.65274286", "0.6385338", "0.6192809", "0.6146675", "0.60793424", "0.6027188", "0.602287", "0.59914076", "0.59914076", "0.59914076", "0.59914076", "0.59914076", "0.59914076", "0.59361726", "0.5920228", "0.58926666", "0.58716476", "0.5851185", "0.5832412", "0.58206725" ]
0.8552058
1
Computes the primal objective value for the desired solution. getprimalobj(self,whichsol_)
def getprimalobj(self,whichsol_): primalobj_ = ctypes.c_double() res = __library__.MSK_XX_getprimalobj(self.__nativep,whichsol_,ctypes.byref(primalobj_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) primalobj_ = primalobj_.value _primalobj_return_value = primalobj_ return (_primalobj_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getprimalobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprimalobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _primalobj_return_value = resargs\n return _primalobj_return_value", "def objective(self) -> Optional[Union[int, float]]:\n if self.solution is not None:\n if isinstance(self.solution, list):\n return getattr(self.solution[-1], \"objective\", None)\n else:\n return getattr(self.solution, \"objective\", None)\n else:\n return None", "def getprosta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprosta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = resargs\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value", "def getdualobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getdualobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dualobj_return_value = resargs\n return _dualobj_return_value", "def get_objective(self, X_v, U_v, X_last_p, U_last_p):\n objective = None\n return objective", "def objective(self):\n return self._objective", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def getprimalsolutionnorms(self,whichsol_):\n nrmxc_ = ctypes.c_double()\n nrmxx_ = ctypes.c_double()\n nrmbarx_ = ctypes.c_double()\n res = __library__.MSK_XX_getprimalsolutionnorms(self.__nativep,whichsol_,ctypes.byref(nrmxc_),ctypes.byref(nrmxx_),ctypes.byref(nrmbarx_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nrmxc_ = nrmxc_.value\n _nrmxc_return_value = nrmxc_\n nrmxx_ = nrmxx_.value\n _nrmxx_return_value = nrmxx_\n nrmbarx_ = nrmbarx_.value\n _nrmbarx_return_value = nrmbarx_\n return (_nrmxc_return_value,_nrmxx_return_value,_nrmbarx_return_value)", "def objective_val(self):\n return self.m.ObjVal", "def getprimalsolutionnorms(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprimalsolutionnorms(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nrmxc_return_value,_nrmxx_return_value,_nrmbarx_return_value = resargs\n return _nrmxc_return_value,_nrmxx_return_value,_nrmbarx_return_value", "def objective(self):\n pass", "def getObjective(self, *args):\n return _libsbml.FbcModelPlugin_getObjective(self, *args)", "def get_solution(self):\r\n return self.solution", "def _getPrimalSolution(u_hat_mean, Amatrix, hypothesisCosts):\n\n # find partial primal solution without conflicts\n idx_selectedHyps = u_hat_mean == 1\n\n idx_unselectedHyps = np.logical_not(idx_selectedHyps)\n\n # Tracks and measurements not used by the partial solution (ordered by\n # tracks first, then measurements for each scan)\n idx_uncertainTracksMeas = np.sum(Amatrix[:, idx_selectedHyps], axis=1).astype(np.int32) == 0\n\n # If a track or measurement used by the partial solution, remove it from\n # the problem to be solved by integer linear programming\n for i, val in enumerate(idx_uncertainTracksMeas):\n if not val:\n idx_unselectedHyps[Amatrix[i, :] == 1] = False\n\n # Solve remaining problem using OR tools solver to find a feasible solution\n A_uncertain = Amatrix[:, idx_unselectedHyps][idx_uncertainTracksMeas, :]\n c_uncertain = hypothesisCosts[idx_unselectedHyps] * 1000000\n\n # Create the mip solver with the SCIP backend.\n solver = pywraplp.Solver.CreateSolver(\"SCIP\")\n\n # Add constraints\n vars = [solver.BoolVar(str(i)) for i in range(c_uncertain.size)]\n for A_uncertain_row in A_uncertain:\n selected_vars = [var for var, A_val in zip(vars, A_uncertain_row) if A_val]\n solver.Add(solver.Sum(selected_vars) == 1)\n\n # Run the solver\n solver.Minimize(solver.Sum([c * var for var, c in zip(vars, c_uncertain)]))\n status = solver.Solve()\n if status not in (pywraplp.Solver.OPTIMAL, pywraplp.Solver.FEASIBLE): # pragma: no cover\n raise RuntimeError(\"Infeasible primal problem\")\n\n uprimal_uncertain = [bool(v.solution_value()) for v in vars]\n\n # Get solution to full problem by combining the partial and linear programming solutions\n u_primal_hat = u_hat_mean == 1\n u_primal_hat[idx_unselectedHyps] = uprimal_uncertain\n\n # Obtain primal cost\n primal_cost_hat = hypothesisCosts @ u_primal_hat\n\n return u_primal_hat, primal_cost_hat", "def getdualobj(self,whichsol_):\n dualobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getdualobj(self.__nativep,whichsol_,ctypes.byref(dualobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dualobj_ = dualobj_.value\n _dualobj_return_value = dualobj_\n return (_dualobj_return_value)", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def get_sol(self):", "def get_prime(self):\n return self.prime", "def get_primal(self, vartype, index_by_reactions=False):\n return get_primal(self, vartype, index_by_reactions)", "def _solve(self, solver):\n self.prob.solve(solver)\n if self.prob.status <= 0:\n raise Exception(\"Infeasible Solution.\")\n return {pid for pid, variable \n in self.player_vars.iteritems()\n if variable.varValue}", "def _fitness_model__(self, solution=None, minmax=0):\n return self.objective_func(solution) if minmax == 0 else 1.0 / (self.objective_func(solution) + self.EPSILON)", "def evaluate_objective(x):\n\n x_points_cartesian = x_to_cartesian(x)\n hull = ConvexHull(x_points_cartesian)\n\n # Return the negative value because the optimization is a minimization\n return -hull.volume", "def getinfeasiblesubproblem(self,whichsol_):\n inftask_ = ctypes.c_void_p()\n res = __library__.MSK_XX_getinfeasiblesubproblem(self.__nativep,whichsol_,ctypes.byref(inftask_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _inftask_return_value = Task(nativep = inftask_)\n return (_inftask_return_value)", "def objective(self, param):\n self.__init__(param, self.data)\n # return self.rmse() + self.penalty()\n return self.rmse() + self.penalty()", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def getqobjij(self,i_,j_): # 3\n res,resargs = self.__obj.getqobjij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _qoij_return_value = resargs\n return _qoij_return_value", "def getprosta(self,whichsol_):\n prosta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getprosta(self.__nativep,whichsol_,ctypes.byref(prosta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n return (_prosta_return_value)", "def primal_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n dim_x, _ = states[0].shape\n\n obj_func = []\n meas = []\n constraints = []\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n\n sys_list = list(range(1, dim, 2))\n\n # Unambiguous consists of k + 1 operators, where the outcome of the k+1^st corresponds to the\n # inconclusive answer.\n if dist_method == \"unambiguous\":\n for i in range(len(states) + 1):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(partial_transpose(meas[i], sys_list, dim_list) >> 0)\n\n for i, _ in enumerate(states):\n for j, _ in enumerate(states):\n if i != j:\n constraints.append(probs[j] * cvxpy.trace(states[j].conj().T @ meas[i]) == 0)\n\n # Minimize error of distinguishing via PPT measurements.\n elif dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(partial_transpose(meas[i], sys_list, dim_list) >> 0)\n\n for i, _ in enumerate(states):\n obj_func.append(probs[i] * cvxpy.trace(states[i].conj().T @ meas[i]))\n\n constraints.append(sum(meas) == np.identity(dim_x))\n\n objective = cvxpy.Maximize(sum(obj_func))\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n return sol_default" ]
[ "0.8639235", "0.6065909", "0.60444075", "0.59410185", "0.5837497", "0.5818431", "0.5802987", "0.57979184", "0.5757774", "0.5748593", "0.57292265", "0.5505549", "0.54718834", "0.54291403", "0.542609", "0.5413844", "0.532878", "0.532377", "0.53026396", "0.5288599", "0.5281505", "0.52596986", "0.523443", "0.5232584", "0.5195267", "0.5188838", "0.5188838", "0.5151937", "0.5147477", "0.5092868" ]
0.8099829
1
Obtains all the quadratic terms in a constraint. getqconk(self,k_,qcsubi_,qcsubj_,qcval_)
def getqconk(self,k_,qcsubi_,qcsubj_,qcval_): maxnumqcnz_ = self.getnumqconknz((k_)) numqcnz_ = ctypes.c_int64() _qcsubi_minlength = self.getnumqconknz((k_)) if self.getnumqconknz((k_)) > 0 and qcsubi_ is not None and len(qcsubi_) != self.getnumqconknz((k_)): raise ValueError("Array argument qcsubi is not long enough: Is %d, expected %d" % (len(qcsubi_),self.getnumqconknz((k_)))) if isinstance(qcsubi_,numpy.ndarray) and not qcsubi_.flags.writeable: raise ValueError("Argument qcsubi must be writable") if qcsubi_ is None: raise ValueError("Argument qcsubi may not be None") if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous: _qcsubi_copyarray = False _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif qcsubi_ is not None: _qcsubi_copyarray = True _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32)) _qcsubi_np_tmp[:] = qcsubi_ assert _qcsubi_np_tmp.flags.contiguous _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _qcsubi_copyarray = False _qcsubi_tmp = None _qcsubj_minlength = self.getnumqconknz((k_)) if self.getnumqconknz((k_)) > 0 and qcsubj_ is not None and len(qcsubj_) != self.getnumqconknz((k_)): raise ValueError("Array argument qcsubj is not long enough: Is %d, expected %d" % (len(qcsubj_),self.getnumqconknz((k_)))) if isinstance(qcsubj_,numpy.ndarray) and not qcsubj_.flags.writeable: raise ValueError("Argument qcsubj must be writable") if qcsubj_ is None: raise ValueError("Argument qcsubj may not be None") if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous: _qcsubj_copyarray = False _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif qcsubj_ is not None: _qcsubj_copyarray = True _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32)) _qcsubj_np_tmp[:] = qcsubj_ assert _qcsubj_np_tmp.flags.contiguous _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _qcsubj_copyarray = False _qcsubj_tmp = None _qcval_minlength = self.getnumqconknz((k_)) if self.getnumqconknz((k_)) > 0 and qcval_ is not None and len(qcval_) != self.getnumqconknz((k_)): raise ValueError("Array argument qcval is not long enough: Is %d, expected %d" % (len(qcval_),self.getnumqconknz((k_)))) if isinstance(qcval_,numpy.ndarray) and not qcval_.flags.writeable: raise ValueError("Argument qcval must be writable") if qcval_ is None: raise ValueError("Argument qcval may not be None") if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous: _qcval_copyarray = False _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif qcval_ is not None: _qcval_copyarray = True _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64)) _qcval_np_tmp[:] = qcval_ assert _qcval_np_tmp.flags.contiguous _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _qcval_copyarray = False _qcval_tmp = None qcsurp_ = ctypes.c_int64(_qcsubi_minlength) res = __library__.MSK_XX_getqconk64(self.__nativep,k_,maxnumqcnz_,ctypes.byref(qcsurp_),ctypes.byref(numqcnz_),_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numqcnz_ = numqcnz_.value _numqcnz_return_value = numqcnz_ if _qcsubi_copyarray: qcsubi_[:] = _qcsubi_np_tmp if _qcsubj_copyarray: qcsubj_[:] = _qcsubj_np_tmp if _qcval_copyarray: qcval_[:] = _qcval_np_tmp return (_numqcnz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getqconk(self,k_,qcsubi,qcsubj,qcval): # 3\n maxnumqcnz_ = self.getnumqconknz((k_))\n if qcsubi is None: raise TypeError(\"Invalid type for argument qcsubi\")\n _copyback_qcsubi = False\n if qcsubi is None:\n qcsubi_ = None\n else:\n try:\n qcsubi_ = memoryview(qcsubi)\n except TypeError:\n try:\n _tmparr_qcsubi = array.array(\"i\",qcsubi)\n except TypeError:\n raise TypeError(\"Argument qcsubi has wrong type\")\n else:\n qcsubi_ = memoryview(_tmparr_qcsubi)\n _copyback_qcsubi = True\n else:\n if qcsubi_.format != \"i\":\n qcsubi_ = memoryview(array.array(\"i\",qcsubi))\n _copyback_qcsubi = True\n if qcsubi_ is not None and len(qcsubi_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubi has wrong length\")\n if qcsubj is None: raise TypeError(\"Invalid type for argument qcsubj\")\n _copyback_qcsubj = False\n if qcsubj is None:\n qcsubj_ = None\n else:\n try:\n qcsubj_ = memoryview(qcsubj)\n except TypeError:\n try:\n _tmparr_qcsubj = array.array(\"i\",qcsubj)\n except TypeError:\n raise TypeError(\"Argument qcsubj has wrong type\")\n else:\n qcsubj_ = memoryview(_tmparr_qcsubj)\n _copyback_qcsubj = True\n else:\n if qcsubj_.format != \"i\":\n qcsubj_ = memoryview(array.array(\"i\",qcsubj))\n _copyback_qcsubj = True\n if qcsubj_ is not None and len(qcsubj_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubj has wrong length\")\n if qcval is None: raise TypeError(\"Invalid type for argument qcval\")\n _copyback_qcval = False\n if qcval is None:\n qcval_ = None\n else:\n try:\n qcval_ = memoryview(qcval)\n except TypeError:\n try:\n _tmparr_qcval = array.array(\"d\",qcval)\n except TypeError:\n raise TypeError(\"Argument qcval has wrong type\")\n else:\n qcval_ = memoryview(_tmparr_qcval)\n _copyback_qcval = True\n else:\n if qcval_.format != \"d\":\n qcval_ = memoryview(array.array(\"d\",qcval))\n _copyback_qcval = True\n if qcval_ is not None and len(qcval_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcval has wrong length\")\n res,resargs = self.__obj.getqconk64(k_,maxnumqcnz_,len(qcsubi),qcsubi_,qcsubj_,qcval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n if _copyback_qcval:\n qcval[:] = _tmparr_qcval\n if _copyback_qcsubj:\n qcsubj[:] = _tmparr_qcsubj\n if _copyback_qcsubi:\n qcsubi[:] = _tmparr_qcsubi\n return _numqcnz_return_value", "def putqconk(self,k_,qcsubi_,qcsubj_,qcval_):\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi_)\n elif numqcnz_ != len(qcsubi_):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj_)\n elif numqcnz_ != len(qcsubj_):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval_)\n elif numqcnz_ != len(qcval_):\n raise IndexError(\"Inconsistent length of array qcval\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi cannot be None\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj cannot be None\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n if qcval_ is None:\n raise ValueError(\"Argument qcval cannot be None\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n res = __library__.MSK_XX_putqconk(self.__nativep,k_,numqcnz_,_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putqconk(self,k_,qcsubi,qcsubj,qcval): # 3\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi)\n elif numqcnz_ != len(qcsubi):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj)\n elif numqcnz_ != len(qcsubj):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval)\n elif numqcnz_ != len(qcval):\n raise IndexError(\"Inconsistent length of array qcval\")\n if numqcnz_ is None: numqcnz_ = 0\n if qcsubi is None: raise TypeError(\"Invalid type for argument qcsubi\")\n if qcsubi is None:\n qcsubi_ = None\n else:\n try:\n qcsubi_ = memoryview(qcsubi)\n except TypeError:\n try:\n _tmparr_qcsubi = array.array(\"i\",qcsubi)\n except TypeError:\n raise TypeError(\"Argument qcsubi has wrong type\")\n else:\n qcsubi_ = memoryview(_tmparr_qcsubi)\n \n else:\n if qcsubi_.format != \"i\":\n qcsubi_ = memoryview(array.array(\"i\",qcsubi))\n \n if qcsubj is None: raise TypeError(\"Invalid type for argument qcsubj\")\n if qcsubj is None:\n qcsubj_ = None\n else:\n try:\n qcsubj_ = memoryview(qcsubj)\n except TypeError:\n try:\n _tmparr_qcsubj = array.array(\"i\",qcsubj)\n except TypeError:\n raise TypeError(\"Argument qcsubj has wrong type\")\n else:\n qcsubj_ = memoryview(_tmparr_qcsubj)\n \n else:\n if qcsubj_.format != \"i\":\n qcsubj_ = memoryview(array.array(\"i\",qcsubj))\n \n if qcval is None: raise TypeError(\"Invalid type for argument qcval\")\n if qcval is None:\n qcval_ = None\n else:\n try:\n qcval_ = memoryview(qcval)\n except TypeError:\n try:\n _tmparr_qcval = array.array(\"d\",qcval)\n except TypeError:\n raise TypeError(\"Argument qcval has wrong type\")\n else:\n qcval_ = memoryview(_tmparr_qcval)\n \n else:\n if qcval_.format != \"d\":\n qcval_ = memoryview(array.array(\"d\",qcval))\n \n res = self.__obj.putqconk(k_,numqcnz_,qcsubi_,qcsubj_,qcval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putqcon(self,qcsubk_,qcsubi_,qcsubj_,qcval_):\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi_)\n elif numqcnz_ != len(qcsubi_):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj_)\n elif numqcnz_ != len(qcsubj_):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval_)\n elif numqcnz_ != len(qcval_):\n raise IndexError(\"Inconsistent length of array qcval\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk cannot be None\")\n if qcsubk_ is None:\n raise ValueError(\"Argument qcsubk may not be None\")\n if isinstance(qcsubk_, numpy.ndarray) and qcsubk_.dtype is numpy.dtype(numpy.int32) and qcsubk_.flags.contiguous:\n _qcsubk_copyarray = False\n _qcsubk_tmp = ctypes.cast(qcsubk_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubk_ is not None:\n _qcsubk_copyarray = True\n _qcsubk_np_tmp = numpy.zeros(len(qcsubk_),numpy.dtype(numpy.int32))\n _qcsubk_np_tmp[:] = qcsubk_\n assert _qcsubk_np_tmp.flags.contiguous\n _qcsubk_tmp = ctypes.cast(_qcsubk_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubk_copyarray = False\n _qcsubk_tmp = None\n \n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi cannot be None\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj cannot be None\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n if qcval_ is None:\n raise ValueError(\"Argument qcval cannot be None\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n res = __library__.MSK_XX_putqcon(self.__nativep,numqcnz_,_qcsubk_tmp,_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putqcon(self,qcsubk,qcsubi,qcsubj,qcval): # 3\n numqcnz_ = None\n if numqcnz_ is None:\n numqcnz_ = len(qcsubi)\n elif numqcnz_ != len(qcsubi):\n raise IndexError(\"Inconsistent length of array qcsubi\")\n if numqcnz_ is None:\n numqcnz_ = len(qcsubj)\n elif numqcnz_ != len(qcsubj):\n raise IndexError(\"Inconsistent length of array qcsubj\")\n if numqcnz_ is None:\n numqcnz_ = len(qcval)\n elif numqcnz_ != len(qcval):\n raise IndexError(\"Inconsistent length of array qcval\")\n if numqcnz_ is None: numqcnz_ = 0\n if qcsubk is None: raise TypeError(\"Invalid type for argument qcsubk\")\n if qcsubk is None:\n qcsubk_ = None\n else:\n try:\n qcsubk_ = memoryview(qcsubk)\n except TypeError:\n try:\n _tmparr_qcsubk = array.array(\"i\",qcsubk)\n except TypeError:\n raise TypeError(\"Argument qcsubk has wrong type\")\n else:\n qcsubk_ = memoryview(_tmparr_qcsubk)\n \n else:\n if qcsubk_.format != \"i\":\n qcsubk_ = memoryview(array.array(\"i\",qcsubk))\n \n if qcsubi is None: raise TypeError(\"Invalid type for argument qcsubi\")\n if qcsubi is None:\n qcsubi_ = None\n else:\n try:\n qcsubi_ = memoryview(qcsubi)\n except TypeError:\n try:\n _tmparr_qcsubi = array.array(\"i\",qcsubi)\n except TypeError:\n raise TypeError(\"Argument qcsubi has wrong type\")\n else:\n qcsubi_ = memoryview(_tmparr_qcsubi)\n \n else:\n if qcsubi_.format != \"i\":\n qcsubi_ = memoryview(array.array(\"i\",qcsubi))\n \n if qcsubj is None: raise TypeError(\"Invalid type for argument qcsubj\")\n if qcsubj is None:\n qcsubj_ = None\n else:\n try:\n qcsubj_ = memoryview(qcsubj)\n except TypeError:\n try:\n _tmparr_qcsubj = array.array(\"i\",qcsubj)\n except TypeError:\n raise TypeError(\"Argument qcsubj has wrong type\")\n else:\n qcsubj_ = memoryview(_tmparr_qcsubj)\n \n else:\n if qcsubj_.format != \"i\":\n qcsubj_ = memoryview(array.array(\"i\",qcsubj))\n \n if qcval is None: raise TypeError(\"Invalid type for argument qcval\")\n if qcval is None:\n qcval_ = None\n else:\n try:\n qcval_ = memoryview(qcval)\n except TypeError:\n try:\n _tmparr_qcval = array.array(\"d\",qcval)\n except TypeError:\n raise TypeError(\"Argument qcval has wrong type\")\n else:\n qcval_ = memoryview(_tmparr_qcval)\n \n else:\n if qcval_.format != \"d\":\n qcval_ = memoryview(array.array(\"d\",qcval))\n \n res = self.__obj.putqcon(numqcnz_,qcsubk_,qcsubi_,qcsubj_,qcval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def Q(self, k, x):\n g = np.asarray(self.g(k, x))\n Q = g @ g.T\n return Q", "def ikcon(self, T, q0=None):\n\n if not isinstance(T, SE3):\n T = SE3(T)\n\n trajn = len(T)\n\n try:\n if q0 is not None:\n q0 = getvector(q0, self.n, 'row')\n else:\n q0 = np.zeros((trajn, self.n))\n except ValueError:\n verifymatrix(q0, (trajn, self.n))\n\n # create output variables\n qstar = np.zeros((trajn, self.n))\n error = []\n exitflag = []\n\n omega = np.diag([1, 1, 1, 3 / self.reach])\n\n def cost(q, T, omega):\n return np.sum(\n (\n (np.linalg.pinv(T.A) @ self.fkine(q).A - np.eye(4)) @\n omega) ** 2\n )\n\n bnds = Bounds(self.qlim[0, :], self.qlim[1, :])\n\n for i in range(trajn):\n Ti = T[i]\n res = minimize(\n lambda q: cost(q, Ti, omega),\n q0[i, :], bounds=bnds, options={'gtol': 1e-6})\n qstar[i, :] = res.x\n error.append(res.fun)\n exitflag.append(res.success)\n\n if trajn > 1:\n return qstar, exitflag, error\n else:\n return qstar[0, :], exitflag[0], error[0]", "def KRC(self, ik, ipd, ipl, t):\n idx = ik - 1\n\n den1 = 1 - self.delta[idx] * self.coca.PK(ik, t)\n num1 = self.delta[idx] * self.thetak[idx]\n ins = num1 / den1\n\n for l in np.arange(0, self.L):\n pl = self.coca.PL(l, t)\n ins += ((self.thetal[l] * self.gamma[l][idx]) / (1 - pl))\n\n ans = ipd * np.exp(t * ipl) * ins\n\n return ans", "def getcone(self,k_,submem): # 3\n _copyback_submem = False\n if submem is None:\n submem_ = None\n else:\n try:\n submem_ = memoryview(submem)\n except TypeError:\n try:\n _tmparr_submem = array.array(\"i\",submem)\n except TypeError:\n raise TypeError(\"Argument submem has wrong type\")\n else:\n submem_ = memoryview(_tmparr_submem)\n _copyback_submem = True\n else:\n if submem_.format != \"i\":\n submem_ = memoryview(array.array(\"i\",submem))\n _copyback_submem = True\n if submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem has wrong length\")\n res,resargs = self.__obj.getcone(k_,submem_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value,_conepar_return_value,_nummem_return_value = resargs\n if _copyback_submem:\n submem[:] = _tmparr_submem\n _ct_return_value = conetype(_ct_return_value)\n return _ct_return_value,_conepar_return_value,_nummem_return_value", "def qmincon(self, q=None):\n\n def sumsqr(A):\n return np.sum(A**2)\n\n def cost(x, ub, lb, qm, N):\n return sumsqr(\n (2 * (N @ x + qm) - ub - lb) / (ub - lb))\n\n q = getmatrix(q, (None, self.n))\n\n qstar = np.zeros((q.shape[0], self.n))\n error = np.zeros(q.shape[0])\n success = np.zeros(q.shape[0])\n\n lb = self.qlim[0, :]\n ub = self.qlim[1, :]\n\n for k, qk in enumerate(q):\n\n J = self.jacobe(qk)\n\n N = null(J)\n\n x0 = np.zeros(N.shape[1])\n A = np.r_[N, -N]\n b = np.r_[ub - qk, qk - lb].reshape(A.shape[0],)\n\n con = LinearConstraint(A, -np.inf, b)\n\n res = minimize(\n lambda x: cost(x, ub, lb, qk, N),\n x0, constraints=con)\n\n qstar[k, :] = qk + N @ res.x\n error[k] = res.fun\n success[k] = res.success\n\n if q.shape[0] == 1:\n return qstar[0, :], success[0], error[0]\n else:\n return qstar, success, error", "def getConstraint(self):\n return self.gk, self.g_mink, self.g_maxk", "def add_cp_qe_RBM_terms(K, cons_pot_mesh, quad_geo_mesh):\n num_faces = cons_pot_mesh.get_faces().shape[0]\n x_c = quad_geo_mesh.get_centroid()\n w = quad_geo_mesh.get_w()\n A_m = quad_geo_mesh.get_A_m()\n S_D = quad_geo_mesh.get_surface_area()\n\n for face_num in range(num_faces):\n face_nodes = quad_geo_mesh.get_tri_nodes(face_num)\n face_hs = quad_geo_mesh.get_hs(face_num)\n def v_quad(_xi, _eta, _nodes):\n return np.identity(3)\n v_sub_mat = (1. / S_D) * gq.int_over_tri_quad(v_quad, face_nodes, face_hs)\n def omega_quad(xi, eta, nodes):\n pos = geo.quadratic_interp(xi, eta, nodes)\n X = pos - x_c\n return np.einsum(\"lrs,s->lr\", geo.LC_3, X)\n tmp_omega = gq.int_over_tri_quad(\n omega_quad,\n face_nodes,\n face_hs,\n )\n tmp_arr = []\n for m in range(3):\n tmp_arr.append((1./ A_m[m]) * np.outer(w[m], np.einsum(\"l,ls\", w[m], tmp_omega)))\n tmp_arr = np.array(tmp_arr)\n tmp_omega_mat = np.sum(tmp_arr, axis=0)\n for src_num in range(num_faces):\n K[(3 * src_num):(3 * src_num + 3),\n (3 * face_num):(3 * face_num + 3)] += v_sub_mat\n src_center = cons_pot_mesh.get_node(src_num)\n X_0 = src_center - x_c\n omega_mat = np.einsum(\"ijk,js,k->is\", geo.LC_3, tmp_omega_mat, X_0)\n K[(3 * src_num):(3 * src_num + 3),\n (3 * face_num):(3 * face_num + 3)] += omega_mat", "def quadratic_dependence(dep_keys, ctfs=list(), force_zero=None, **kwargs):\n if force_zero is None and len(kwargs) == 0:\n return _dependence(\n f=np.polyval, n_params=3,\n dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,\n name='quadratic dependence', code='p2:{}', **kwargs\n )\n else:\n return _dependence(\n f=lambda p, x: np.polyval(np.concatenate((p, np.zeros(1))), x),\n n_params=2, dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,\n name='quadratic dependence', code='p2:{}', **kwargs\n )", "def evaluate_C_q(self, q):\n C_q_list = []\n\n\n GlobalVariables.q_i_dim[body_id]", "def getconeinfo(self,k_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n res = __library__.MSK_XX_getconeinfo(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def additional_equations(self, k):\n return", "def Qc(I, dT, a, b, c, d, e, f, g, h, i, k):\n x1 = I # I\n x2 = dT # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def _compute_kreinParameters(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"q\"):\n return\n if not self._has(\"m\"):\n self.multiplicities(expand=expand, factor=factor,\n simplify=simplify)\n if not self._has(\"k\"):\n self.kTable(expand=expand, factor=factor,\n simplify=simplify)\n q = Array3D(self._.d + 1)\n self._compute_parameters(q, self._.Q, self._.k, integral=False,\n name=DUAL_PARAMETER, sym=DUAL_SYMBOL)\n self._.q = q", "def quadratic(a, b, c):\n A, B, C = K(a), K(b), K(c)\n AXX = mul_fns(A, mul_fns(X, X))\n BX = mul_fns(B, X)\n return add_fns(AXX, add_fns(BX, C))", "def get_poly_cc(n, k, t):\n assert (n > 0 and k >= 0), \"order and derivative must be positive.\"\n\n cc = np.ones(n)\n D = np.linspace(0, n-1, n)\n\n for i in range(n):\n for j in range(k):\n cc[i] = cc[i] * D[i]\n D[i] = D[i] - 1\n if D[i] == -1:\n D[i] = 0\n\n for i, c in enumerate(cc):\n cc[i] = c * np.power(t, D[i])\n\n return cc", "def conjunction(x, vx, k):\n t = np.sort(x / np.sqrt(np.maximum(vx, 1.e-15)))\n cjt = np.sum(t[:, :k], 1)\n return cjt", "def getcone(self,k_,submem_):\n ct_ = ctypes.c_int32()\n conepar_ = ctypes.c_double()\n nummem_ = ctypes.c_int32()\n _submem_minlength = self.getconeinfo((k_))[2]\n if self.getconeinfo((k_))[2] > 0 and submem_ is not None and len(submem_) != self.getconeinfo((k_))[2]:\n raise ValueError(\"Array argument submem is not long enough: Is %d, expected %d\" % (len(submem_),self.getconeinfo((k_))[2]))\n if isinstance(submem_,numpy.ndarray) and not submem_.flags.writeable:\n raise ValueError(\"Argument submem must be writable\")\n if isinstance(submem_, numpy.ndarray) and submem_.dtype is numpy.dtype(numpy.int32) and submem_.flags.contiguous:\n _submem_copyarray = False\n _submem_tmp = ctypes.cast(submem_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif submem_ is not None:\n _submem_copyarray = True\n _submem_np_tmp = numpy.zeros(len(submem_),numpy.dtype(numpy.int32))\n _submem_np_tmp[:] = submem_\n assert _submem_np_tmp.flags.contiguous\n _submem_tmp = ctypes.cast(_submem_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _submem_copyarray = False\n _submem_tmp = None\n \n res = __library__.MSK_XX_getcone(self.__nativep,k_,ctypes.byref(ct_),ctypes.byref(conepar_),ctypes.byref(nummem_),_submem_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _ct_return_value = conetype(ct_.value)\n conepar_ = conepar_.value\n _conepar_return_value = conepar_\n nummem_ = nummem_.value\n _nummem_return_value = nummem_\n if _submem_copyarray:\n submem_[:] = _submem_np_tmp\n return (_ct_return_value,_conepar_return_value,_nummem_return_value)", "def C(self, q, dq):\n # check for function in dictionary\n if self._C is None:\n self._C = self._calc_C()\n parameters = tuple(q) + tuple(dq)\n return np.array(self._C(*parameters), dtype='float32')", "def get_Cp(self, K0):\n # if(self.weight>=1.5):\n # raise ValueError,\" Error bounds only accurate for k<1.5! got k=%s\" % self.weight\n mp2 = mpmath.mpf(2)\n twominusk = mp2 - self._weight\n tmp = mpmath.mpf(len(self.multiplier().weil_module().D()))\n tmp0 = mpmath.sqrt(tmp) + mpmath.mpf(1)\n tmp1 = mpmath.pi() * mpmath.mpf(4)\n Cp1 = tmp1 * mpmath.sqrt(abs(K0))\n tmp1 = mpmath.power(tmp1, twominusk)\n tmp2 = mpmath.besseli(1 - self._weight, 1.0)\n tmp3 = mpmath.zeta(twominusk)\n if(K0 == 0):\n tmp4 = 1\n else:\n tmp4 = mpmath.power(K0, 1 - self._weight)\n Cp0 = tmp0 * tmp1 * tmp2 * tmp3 * tmp4\n return [Cp0, Cp1]", "def Qc_fit(x, a, b, c, d, e, f, g, h, i, k):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def __getitem__(self, k) :\n try :\n if not isinstance(k, tuple) :\n raise ValueError\n \n (ch, k) = k\n if k not in self.parent().monoid() :\n s = (ch, k)\n ch = None\n else :\n s = k\n except ValueError :\n s = k\n ch = None\n \n try :\n if not ch.parent() == self.parent().characters() :\n ch = None\n except AttributeError :\n ch = None\n \n if ch is None :\n ns = self.non_zero_components()\n if len(ns) == 0 :\n return 0\n elif len(ns) == 1 :\n ch = ns[0]\n else :\n raise ValueError, \"you must specify a character\"\n \n if not s in self.precision() :\n raise ValueError, \"%s out of bound\" % (s,)\n\n try :\n return self.__coefficients[ch][s]\n except KeyError :\n (rs, g) = self.parent()._reduction_function()(s)\n \n try :\n return self.parent()._character_eval_function()(g, ch) \\\n * self.parent()._apply_function()(g, self.__coefficients[ch][rs])\n except KeyError :\n return self.parent().coefficient_domain().zero_element()", "def eqconstr(x, problem):\n x, t_final = matrify(x, problem)\n return np.concatenate([problem['dynamics'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])" ]
[ "0.73472387", "0.66726524", "0.6591416", "0.6424358", "0.6321435", "0.56947607", "0.55449796", "0.53937316", "0.53766406", "0.5353066", "0.53088754", "0.5232903", "0.51952076", "0.5171041", "0.51661", "0.51115626", "0.5044338", "0.50178754", "0.50012517", "0.49925134", "0.49877807", "0.49838224", "0.4977614", "0.4971062", "0.49593672", "0.49563286", "0.49061775", "0.49024156", "0.48660251", "0.4834382" ]
0.72974324
1
Obtains all the quadratic terms in the objective. getqobj(self,qosubi_,qosubj_,qoval_)
def getqobj(self,qosubi_,qosubj_,qoval_): maxnumqonz_ = self.getnumqobjnz() numqonz_ = ctypes.c_int64() _qosubi_minlength = (maxnumqonz_) if (maxnumqonz_) > 0 and qosubi_ is not None and len(qosubi_) != (maxnumqonz_): raise ValueError("Array argument qosubi is not long enough: Is %d, expected %d" % (len(qosubi_),(maxnumqonz_))) if isinstance(qosubi_,numpy.ndarray) and not qosubi_.flags.writeable: raise ValueError("Argument qosubi must be writable") if qosubi_ is None: raise ValueError("Argument qosubi may not be None") if isinstance(qosubi_, numpy.ndarray) and qosubi_.dtype is numpy.dtype(numpy.int32) and qosubi_.flags.contiguous: _qosubi_copyarray = False _qosubi_tmp = ctypes.cast(qosubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif qosubi_ is not None: _qosubi_copyarray = True _qosubi_np_tmp = numpy.zeros(len(qosubi_),numpy.dtype(numpy.int32)) _qosubi_np_tmp[:] = qosubi_ assert _qosubi_np_tmp.flags.contiguous _qosubi_tmp = ctypes.cast(_qosubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _qosubi_copyarray = False _qosubi_tmp = None _qosubj_minlength = (maxnumqonz_) if (maxnumqonz_) > 0 and qosubj_ is not None and len(qosubj_) != (maxnumqonz_): raise ValueError("Array argument qosubj is not long enough: Is %d, expected %d" % (len(qosubj_),(maxnumqonz_))) if isinstance(qosubj_,numpy.ndarray) and not qosubj_.flags.writeable: raise ValueError("Argument qosubj must be writable") if qosubj_ is None: raise ValueError("Argument qosubj may not be None") if isinstance(qosubj_, numpy.ndarray) and qosubj_.dtype is numpy.dtype(numpy.int32) and qosubj_.flags.contiguous: _qosubj_copyarray = False _qosubj_tmp = ctypes.cast(qosubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) elif qosubj_ is not None: _qosubj_copyarray = True _qosubj_np_tmp = numpy.zeros(len(qosubj_),numpy.dtype(numpy.int32)) _qosubj_np_tmp[:] = qosubj_ assert _qosubj_np_tmp.flags.contiguous _qosubj_tmp = ctypes.cast(_qosubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32)) else: _qosubj_copyarray = False _qosubj_tmp = None _qoval_minlength = (maxnumqonz_) if (maxnumqonz_) > 0 and qoval_ is not None and len(qoval_) != (maxnumqonz_): raise ValueError("Array argument qoval is not long enough: Is %d, expected %d" % (len(qoval_),(maxnumqonz_))) if isinstance(qoval_,numpy.ndarray) and not qoval_.flags.writeable: raise ValueError("Argument qoval must be writable") if qoval_ is None: raise ValueError("Argument qoval may not be None") if isinstance(qoval_, numpy.ndarray) and qoval_.dtype is numpy.dtype(numpy.float64) and qoval_.flags.contiguous: _qoval_copyarray = False _qoval_tmp = ctypes.cast(qoval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif qoval_ is not None: _qoval_copyarray = True _qoval_np_tmp = numpy.zeros(len(qoval_),numpy.dtype(numpy.float64)) _qoval_np_tmp[:] = qoval_ assert _qoval_np_tmp.flags.contiguous _qoval_tmp = ctypes.cast(_qoval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _qoval_copyarray = False _qoval_tmp = None qosurp_ = ctypes.c_int64(_qosubi_minlength) res = __library__.MSK_XX_getqobj64(self.__nativep,maxnumqonz_,ctypes.byref(qosurp_),ctypes.byref(numqonz_),_qosubi_tmp,_qosubj_tmp,_qoval_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) numqonz_ = numqonz_.value _numqonz_return_value = numqonz_ if _qosubi_copyarray: qosubi_[:] = _qosubi_np_tmp if _qosubj_copyarray: qosubj_[:] = _qosubj_np_tmp if _qoval_copyarray: qoval_[:] = _qoval_np_tmp return (_numqonz_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getqobj(self,qosubi,qosubj,qoval): # 3\n maxnumqonz_ = self.getnumqobjnz()\n if qosubi is None: raise TypeError(\"Invalid type for argument qosubi\")\n _copyback_qosubi = False\n if qosubi is None:\n qosubi_ = None\n else:\n try:\n qosubi_ = memoryview(qosubi)\n except TypeError:\n try:\n _tmparr_qosubi = array.array(\"i\",qosubi)\n except TypeError:\n raise TypeError(\"Argument qosubi has wrong type\")\n else:\n qosubi_ = memoryview(_tmparr_qosubi)\n _copyback_qosubi = True\n else:\n if qosubi_.format != \"i\":\n qosubi_ = memoryview(array.array(\"i\",qosubi))\n _copyback_qosubi = True\n if qosubi_ is not None and len(qosubi_) != (maxnumqonz_):\n raise ValueError(\"Array argument qosubi has wrong length\")\n if qosubj is None: raise TypeError(\"Invalid type for argument qosubj\")\n _copyback_qosubj = False\n if qosubj is None:\n qosubj_ = None\n else:\n try:\n qosubj_ = memoryview(qosubj)\n except TypeError:\n try:\n _tmparr_qosubj = array.array(\"i\",qosubj)\n except TypeError:\n raise TypeError(\"Argument qosubj has wrong type\")\n else:\n qosubj_ = memoryview(_tmparr_qosubj)\n _copyback_qosubj = True\n else:\n if qosubj_.format != \"i\":\n qosubj_ = memoryview(array.array(\"i\",qosubj))\n _copyback_qosubj = True\n if qosubj_ is not None and len(qosubj_) != (maxnumqonz_):\n raise ValueError(\"Array argument qosubj has wrong length\")\n if qoval is None: raise TypeError(\"Invalid type for argument qoval\")\n _copyback_qoval = False\n if qoval is None:\n qoval_ = None\n else:\n try:\n qoval_ = memoryview(qoval)\n except TypeError:\n try:\n _tmparr_qoval = array.array(\"d\",qoval)\n except TypeError:\n raise TypeError(\"Argument qoval has wrong type\")\n else:\n qoval_ = memoryview(_tmparr_qoval)\n _copyback_qoval = True\n else:\n if qoval_.format != \"d\":\n qoval_ = memoryview(array.array(\"d\",qoval))\n _copyback_qoval = True\n if qoval_ is not None and len(qoval_) != (maxnumqonz_):\n raise ValueError(\"Array argument qoval has wrong length\")\n res,resargs = self.__obj.getqobj64(maxnumqonz_,len(qosubi),qosubi_,qosubj_,qoval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqonz_return_value = resargs\n if _copyback_qoval:\n qoval[:] = _tmparr_qoval\n if _copyback_qosubj:\n qosubj[:] = _tmparr_qosubj\n if _copyback_qosubi:\n qosubi[:] = _tmparr_qosubi\n return _numqonz_return_value", "def putqobj(self,qosubi,qosubj,qoval): # 3\n numqonz_ = None\n if numqonz_ is None:\n numqonz_ = len(qosubi)\n elif numqonz_ != len(qosubi):\n raise IndexError(\"Inconsistent length of array qosubi\")\n if numqonz_ is None:\n numqonz_ = len(qosubj)\n elif numqonz_ != len(qosubj):\n raise IndexError(\"Inconsistent length of array qosubj\")\n if numqonz_ is None:\n numqonz_ = len(qoval)\n elif numqonz_ != len(qoval):\n raise IndexError(\"Inconsistent length of array qoval\")\n if numqonz_ is None: numqonz_ = 0\n if qosubi is None: raise TypeError(\"Invalid type for argument qosubi\")\n if qosubi is None:\n qosubi_ = None\n else:\n try:\n qosubi_ = memoryview(qosubi)\n except TypeError:\n try:\n _tmparr_qosubi = array.array(\"i\",qosubi)\n except TypeError:\n raise TypeError(\"Argument qosubi has wrong type\")\n else:\n qosubi_ = memoryview(_tmparr_qosubi)\n \n else:\n if qosubi_.format != \"i\":\n qosubi_ = memoryview(array.array(\"i\",qosubi))\n \n if qosubj is None: raise TypeError(\"Invalid type for argument qosubj\")\n if qosubj is None:\n qosubj_ = None\n else:\n try:\n qosubj_ = memoryview(qosubj)\n except TypeError:\n try:\n _tmparr_qosubj = array.array(\"i\",qosubj)\n except TypeError:\n raise TypeError(\"Argument qosubj has wrong type\")\n else:\n qosubj_ = memoryview(_tmparr_qosubj)\n \n else:\n if qosubj_.format != \"i\":\n qosubj_ = memoryview(array.array(\"i\",qosubj))\n \n if qoval is None: raise TypeError(\"Invalid type for argument qoval\")\n if qoval is None:\n qoval_ = None\n else:\n try:\n qoval_ = memoryview(qoval)\n except TypeError:\n try:\n _tmparr_qoval = array.array(\"d\",qoval)\n except TypeError:\n raise TypeError(\"Argument qoval has wrong type\")\n else:\n qoval_ = memoryview(_tmparr_qoval)\n \n else:\n if qoval_.format != \"d\":\n qoval_ = memoryview(array.array(\"d\",qoval))\n \n res = self.__obj.putqobj(numqonz_,qosubi_,qosubj_,qoval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putqobj(self,qosubi_,qosubj_,qoval_):\n numqonz_ = None\n if numqonz_ is None:\n numqonz_ = len(qosubi_)\n elif numqonz_ != len(qosubi_):\n raise IndexError(\"Inconsistent length of array qosubi\")\n if numqonz_ is None:\n numqonz_ = len(qosubj_)\n elif numqonz_ != len(qosubj_):\n raise IndexError(\"Inconsistent length of array qosubj\")\n if numqonz_ is None:\n numqonz_ = len(qoval_)\n elif numqonz_ != len(qoval_):\n raise IndexError(\"Inconsistent length of array qoval\")\n if qosubi_ is None:\n raise ValueError(\"Argument qosubi cannot be None\")\n if qosubi_ is None:\n raise ValueError(\"Argument qosubi may not be None\")\n if isinstance(qosubi_, numpy.ndarray) and qosubi_.dtype is numpy.dtype(numpy.int32) and qosubi_.flags.contiguous:\n _qosubi_copyarray = False\n _qosubi_tmp = ctypes.cast(qosubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qosubi_ is not None:\n _qosubi_copyarray = True\n _qosubi_np_tmp = numpy.zeros(len(qosubi_),numpy.dtype(numpy.int32))\n _qosubi_np_tmp[:] = qosubi_\n assert _qosubi_np_tmp.flags.contiguous\n _qosubi_tmp = ctypes.cast(_qosubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qosubi_copyarray = False\n _qosubi_tmp = None\n \n if qosubj_ is None:\n raise ValueError(\"Argument qosubj cannot be None\")\n if qosubj_ is None:\n raise ValueError(\"Argument qosubj may not be None\")\n if isinstance(qosubj_, numpy.ndarray) and qosubj_.dtype is numpy.dtype(numpy.int32) and qosubj_.flags.contiguous:\n _qosubj_copyarray = False\n _qosubj_tmp = ctypes.cast(qosubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qosubj_ is not None:\n _qosubj_copyarray = True\n _qosubj_np_tmp = numpy.zeros(len(qosubj_),numpy.dtype(numpy.int32))\n _qosubj_np_tmp[:] = qosubj_\n assert _qosubj_np_tmp.flags.contiguous\n _qosubj_tmp = ctypes.cast(_qosubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qosubj_copyarray = False\n _qosubj_tmp = None\n \n if qoval_ is None:\n raise ValueError(\"Argument qoval cannot be None\")\n if qoval_ is None:\n raise ValueError(\"Argument qoval may not be None\")\n if isinstance(qoval_, numpy.ndarray) and qoval_.dtype is numpy.dtype(numpy.float64) and qoval_.flags.contiguous:\n _qoval_copyarray = False\n _qoval_tmp = ctypes.cast(qoval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qoval_ is not None:\n _qoval_copyarray = True\n _qoval_np_tmp = numpy.zeros(len(qoval_),numpy.dtype(numpy.float64))\n _qoval_np_tmp[:] = qoval_\n assert _qoval_np_tmp.flags.contiguous\n _qoval_tmp = ctypes.cast(_qoval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qoval_copyarray = False\n _qoval_tmp = None\n \n res = __library__.MSK_XX_putqobj(self.__nativep,numqonz_,_qosubi_tmp,_qosubj_tmp,_qoval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getqobjij(self,i_,j_):\n qoij_ = ctypes.c_double()\n res = __library__.MSK_XX_getqobjij(self.__nativep,i_,j_,ctypes.byref(qoij_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n qoij_ = qoij_.value\n _qoij_return_value = qoij_\n return (_qoij_return_value)", "def getqobjij(self,i_,j_): # 3\n res,resargs = self.__obj.getqobjij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _qoij_return_value = resargs\n return _qoij_return_value", "def q(self):\n return self.model.gmmobjective(self.params, self.weights)", "def _getq(self, q=None):\n if q is None:\n return self.q\n elif isvector(q, self.n):\n return getvector(q, self.n)\n else:\n return getmatrix(q, (None, self.n))", "def getqconk(self,k_,qcsubi,qcsubj,qcval): # 3\n maxnumqcnz_ = self.getnumqconknz((k_))\n if qcsubi is None: raise TypeError(\"Invalid type for argument qcsubi\")\n _copyback_qcsubi = False\n if qcsubi is None:\n qcsubi_ = None\n else:\n try:\n qcsubi_ = memoryview(qcsubi)\n except TypeError:\n try:\n _tmparr_qcsubi = array.array(\"i\",qcsubi)\n except TypeError:\n raise TypeError(\"Argument qcsubi has wrong type\")\n else:\n qcsubi_ = memoryview(_tmparr_qcsubi)\n _copyback_qcsubi = True\n else:\n if qcsubi_.format != \"i\":\n qcsubi_ = memoryview(array.array(\"i\",qcsubi))\n _copyback_qcsubi = True\n if qcsubi_ is not None and len(qcsubi_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubi has wrong length\")\n if qcsubj is None: raise TypeError(\"Invalid type for argument qcsubj\")\n _copyback_qcsubj = False\n if qcsubj is None:\n qcsubj_ = None\n else:\n try:\n qcsubj_ = memoryview(qcsubj)\n except TypeError:\n try:\n _tmparr_qcsubj = array.array(\"i\",qcsubj)\n except TypeError:\n raise TypeError(\"Argument qcsubj has wrong type\")\n else:\n qcsubj_ = memoryview(_tmparr_qcsubj)\n _copyback_qcsubj = True\n else:\n if qcsubj_.format != \"i\":\n qcsubj_ = memoryview(array.array(\"i\",qcsubj))\n _copyback_qcsubj = True\n if qcsubj_ is not None and len(qcsubj_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubj has wrong length\")\n if qcval is None: raise TypeError(\"Invalid type for argument qcval\")\n _copyback_qcval = False\n if qcval is None:\n qcval_ = None\n else:\n try:\n qcval_ = memoryview(qcval)\n except TypeError:\n try:\n _tmparr_qcval = array.array(\"d\",qcval)\n except TypeError:\n raise TypeError(\"Argument qcval has wrong type\")\n else:\n qcval_ = memoryview(_tmparr_qcval)\n _copyback_qcval = True\n else:\n if qcval_.format != \"d\":\n qcval_ = memoryview(array.array(\"d\",qcval))\n _copyback_qcval = True\n if qcval_ is not None and len(qcval_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcval has wrong length\")\n res,resargs = self.__obj.getqconk64(k_,maxnumqcnz_,len(qcsubi),qcsubi_,qcsubj_,qcval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numqcnz_return_value = resargs\n if _copyback_qcval:\n qcval[:] = _tmparr_qcval\n if _copyback_qcsubj:\n qcsubj[:] = _tmparr_qcsubj\n if _copyback_qcsubi:\n qcsubi[:] = _tmparr_qcsubi\n return _numqcnz_return_value", "def jval(self):\n return self.q * self.model.nobs_moms", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def get_q(self):\n for state in self.vibresults:\n dFdG = []\n j = 0\n for i in range(3*len(self.indices)):\n if (i+1)%3 == 0:\n # a z-component\n try:\n differential = self.dFdG[state][j]\n except IndexError:\n print('Missing data!')\n continue\n dFdG.append([0,0,differential[-1]])\n j += 1\n else:\n dFdG.append([0, 0, 0])\n dFdG = np.array(dFdG)\n mu_axes = dFdG.T[-1]\n # now dot product with the different modes available\n for index, mode in enumerate(self.modes):\n try:\n q = np.dot(mu_axes, mode)\n except ValueError:\n continue\n self.q.setdefault(state,{})[index] = q", "def get_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def get_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def get_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def getprimalobj(self,whichsol_):\n primalobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getprimalobj(self.__nativep,whichsol_,ctypes.byref(primalobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n primalobj_ = primalobj_.value\n _primalobj_return_value = primalobj_\n return (_primalobj_return_value)", "def putqobjij(self,i_,j_,qoij_): # 3\n res = self.__obj.putqobjij(i_,j_,qoij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putqobjij(self,i_,j_,qoij_):\n res = __library__.MSK_XX_putqobjij(self.__nativep,i_,j_,qoij_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _Q(self, x, y):\n\n # Calculate the [Q] coefficient matrix\n Q = array([[0, 0, 0, -2, 0, 0, -6*x, -2*y, 0, 0, -6*x*y, 0],\n [0, 0, 0, 0, 0, -2, 0, 0, -2*x, -6*y, 0, -6*x*y],\n [0, 0, 0, 0, -2, 0, 0, -4*x, -4*y, 0, -6*x**2, -6*y**2]])\n \n # Return the [Q] coefficient matrix\n return Q", "def q_criterion(a):\n print(\"Detection method: Q criterion\")\n Q = np.zeros((a.u.shape[0], a.u.shape[1]))\n print(a.u.shape[0], a.u.shape[1])\n #print(Q.shape)\n for i in range(a.u.shape[0]):\n for j in range(a.u.shape[1]):\n Q[i, j] = -0.5*(a.derivative['dudx'][i, j]**2 + a.derivative['dvdy'][i, j]**2) \\\n - a.derivative['dudy'][i, j] * a.derivative['dvdx'][i, j]\n return Q", "def getqconk(self,k_,qcsubi_,qcsubj_,qcval_):\n maxnumqcnz_ = self.getnumqconknz((k_))\n numqcnz_ = ctypes.c_int64()\n _qcsubi_minlength = self.getnumqconknz((k_))\n if self.getnumqconknz((k_)) > 0 and qcsubi_ is not None and len(qcsubi_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubi is not long enough: Is %d, expected %d\" % (len(qcsubi_),self.getnumqconknz((k_))))\n if isinstance(qcsubi_,numpy.ndarray) and not qcsubi_.flags.writeable:\n raise ValueError(\"Argument qcsubi must be writable\")\n if qcsubi_ is None:\n raise ValueError(\"Argument qcsubi may not be None\")\n if isinstance(qcsubi_, numpy.ndarray) and qcsubi_.dtype is numpy.dtype(numpy.int32) and qcsubi_.flags.contiguous:\n _qcsubi_copyarray = False\n _qcsubi_tmp = ctypes.cast(qcsubi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubi_ is not None:\n _qcsubi_copyarray = True\n _qcsubi_np_tmp = numpy.zeros(len(qcsubi_),numpy.dtype(numpy.int32))\n _qcsubi_np_tmp[:] = qcsubi_\n assert _qcsubi_np_tmp.flags.contiguous\n _qcsubi_tmp = ctypes.cast(_qcsubi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubi_copyarray = False\n _qcsubi_tmp = None\n \n _qcsubj_minlength = self.getnumqconknz((k_))\n if self.getnumqconknz((k_)) > 0 and qcsubj_ is not None and len(qcsubj_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcsubj is not long enough: Is %d, expected %d\" % (len(qcsubj_),self.getnumqconknz((k_))))\n if isinstance(qcsubj_,numpy.ndarray) and not qcsubj_.flags.writeable:\n raise ValueError(\"Argument qcsubj must be writable\")\n if qcsubj_ is None:\n raise ValueError(\"Argument qcsubj may not be None\")\n if isinstance(qcsubj_, numpy.ndarray) and qcsubj_.dtype is numpy.dtype(numpy.int32) and qcsubj_.flags.contiguous:\n _qcsubj_copyarray = False\n _qcsubj_tmp = ctypes.cast(qcsubj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif qcsubj_ is not None:\n _qcsubj_copyarray = True\n _qcsubj_np_tmp = numpy.zeros(len(qcsubj_),numpy.dtype(numpy.int32))\n _qcsubj_np_tmp[:] = qcsubj_\n assert _qcsubj_np_tmp.flags.contiguous\n _qcsubj_tmp = ctypes.cast(_qcsubj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _qcsubj_copyarray = False\n _qcsubj_tmp = None\n \n _qcval_minlength = self.getnumqconknz((k_))\n if self.getnumqconknz((k_)) > 0 and qcval_ is not None and len(qcval_) != self.getnumqconknz((k_)):\n raise ValueError(\"Array argument qcval is not long enough: Is %d, expected %d\" % (len(qcval_),self.getnumqconknz((k_))))\n if isinstance(qcval_,numpy.ndarray) and not qcval_.flags.writeable:\n raise ValueError(\"Argument qcval must be writable\")\n if qcval_ is None:\n raise ValueError(\"Argument qcval may not be None\")\n if isinstance(qcval_, numpy.ndarray) and qcval_.dtype is numpy.dtype(numpy.float64) and qcval_.flags.contiguous:\n _qcval_copyarray = False\n _qcval_tmp = ctypes.cast(qcval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif qcval_ is not None:\n _qcval_copyarray = True\n _qcval_np_tmp = numpy.zeros(len(qcval_),numpy.dtype(numpy.float64))\n _qcval_np_tmp[:] = qcval_\n assert _qcval_np_tmp.flags.contiguous\n _qcval_tmp = ctypes.cast(_qcval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _qcval_copyarray = False\n _qcval_tmp = None\n \n qcsurp_ = ctypes.c_int64(_qcsubi_minlength)\n res = __library__.MSK_XX_getqconk64(self.__nativep,k_,maxnumqcnz_,ctypes.byref(qcsurp_),ctypes.byref(numqcnz_),_qcsubi_tmp,_qcsubj_tmp,_qcval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numqcnz_ = numqcnz_.value\n _numqcnz_return_value = numqcnz_\n if _qcsubi_copyarray:\n qcsubi_[:] = _qcsubi_np_tmp\n if _qcsubj_copyarray:\n qcsubj_[:] = _qcsubj_np_tmp\n if _qcval_copyarray:\n qcval_[:] = _qcval_np_tmp\n return (_numqcnz_return_value)", "def __float__(self):\n return self.q[0]", "def evaluate_C_q(self, q):\n C_q_list = []\n\n\n GlobalVariables.q_i_dim[body_id]", "def test_solve_quadratic(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.fixed = False\n iden2.x.fixed = False\n iden3.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 0)\n self.assertAlmostEqual(iden1.x.val, 1)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 3)", "def objective(self):\n pass", "def getprimalobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprimalobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _primalobj_return_value = resargs\n return _primalobj_return_value", "def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])", "def get_requisite_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def qsolve(self, options=''):\n for x in self._rhs:\n if x != 0:\n raise NotImplementedError, \"qsolve is currently only implemented for homogeneous systems (i.e., with rhs=0)\"\n out, err = self.call_4ti2('qsolve', options=options)\n qhom = ExtremalRays(self._read_file('qhom'), self)\n qfree = self._read_file('qfree')\n return (qhom, qfree)", "def generate_VQE_args(self):\n Hamiltonian = self.generate_ising_hamiltonian(self.graphcover)\n Operator = self.get_qubitops(Hamiltonian, self.verbose)\n\n var_form = RYRZ(num_qubits=Hamiltonian.shape[0], \n depth=5, entanglement=\"linear\", \n initial_state=None)\n opt = SPSA(max_trials=self.niter)\n print(\"Operator with number of qubits: {}\".format(Operator.num_qubits))\n\n return Operator, var_form, opt", "def __complex__(self):\n return complex(self.q[0], self.q[1])" ]
[ "0.7446692", "0.6420837", "0.6285769", "0.62362957", "0.5942086", "0.5876235", "0.5872058", "0.566999", "0.5666952", "0.5595695", "0.5467557", "0.5461396", "0.5461396", "0.5461396", "0.5456688", "0.545507", "0.5411939", "0.53708935", "0.53272456", "0.5319744", "0.5307098", "0.52718407", "0.52589583", "0.5186626", "0.5170212", "0.5148856", "0.51443225", "0.51304257", "0.5129921", "0.5128437" ]
0.7120849
1
Obtains one coefficient from the quadratic term of the objective getqobjij(self,i_,j_)
def getqobjij(self,i_,j_): qoij_ = ctypes.c_double() res = __library__.MSK_XX_getqobjij(self.__nativep,i_,j_,ctypes.byref(qoij_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) qoij_ = qoij_.value _qoij_return_value = qoij_ return (_qoij_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getqobjij(self,i_,j_): # 3\n res,resargs = self.__obj.getqobjij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _qoij_return_value = resargs\n return _qoij_return_value", "def getCoefficient(self):\n return _libsbml.FluxObjective_getCoefficient(self)", "def _qij_0(i: int, j: int):\n ia = i * 2 + 0\n ib = i * 2 + 1\n ja = j * 2 + 0\n jb = j * 2 + 1\n term1 = FermionOperator(((ja, 0), (ib, 0)), 1.0)\n term2 = FermionOperator(((ia, 0), (jb, 0)), 1.0)\n return numpy.sqrt(0.5) * (term1 - term2)", "def putqobjij(self,i_,j_,qoij_): # 3\n res = self.__obj.putqobjij(i_,j_,qoij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def jval(self):\n return self.q * self.model.nobs_moms", "def getcj(self,j_):\n cj_ = ctypes.c_double()\n res = __library__.MSK_XX_getcj(self.__nativep,j_,ctypes.byref(cj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n cj_ = cj_.value\n _cj_return_value = cj_\n return (_cj_return_value)", "def putqobjij(self,i_,j_,qoij_):\n res = __library__.MSK_XX_putqobjij(self.__nativep,i_,j_,qoij_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getcj(self,j_): # 3\n res,resargs = self.__obj.getcj(j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _cj_return_value = resargs\n return _cj_return_value", "def get_jacobian_spatial(self, qs=None) -> np.ndarray:\n if qs is None:\n qs = self.get_current_joint_position()\n return self.robot.jacob0(qs)", "def coeff(self):\n return self._coeff", "def objective(self):\n return self._objective", "def getaij(self,i_,j_):\n aij_ = ctypes.c_double()\n res = __library__.MSK_XX_getaij(self.__nativep,i_,j_,ctypes.byref(aij_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n aij_ = aij_.value\n _aij_return_value = aij_\n return (_aij_return_value)", "def __getitem__(self, s) :\n try :\n return self.coefficients()[s]\n except KeyError :\n return self.parent().coefficient_domain().zero_element()", "def _qij_minus(i: int, j: int):\n ib = i * 2 + 1\n jb = j * 2 + 1\n term = FermionOperator(((jb, 0), (ib, 0)), 1.0)\n return term", "def objective(self):\n pass", "def _Q(self, x, y):\n\n # Calculate the [Q] coefficient matrix\n Q = array([[0, 0, 0, -2, 0, 0, -6*x, -2*y, 0, 0, -6*x*y, 0],\n [0, 0, 0, 0, 0, -2, 0, 0, -2*x, -6*y, 0, -6*x*y],\n [0, 0, 0, 0, -2, 0, 0, -4*x, -4*y, 0, -6*x**2, -6*y**2]])\n \n # Return the [Q] coefficient matrix\n return Q", "def get_coefficient(self, line, column):\n return self.coefficients[line][column]", "def qmincon(self, q=None):\n\n def sumsqr(A):\n return np.sum(A**2)\n\n def cost(x, ub, lb, qm, N):\n return sumsqr(\n (2 * (N @ x + qm) - ub - lb) / (ub - lb))\n\n q = getmatrix(q, (None, self.n))\n\n qstar = np.zeros((q.shape[0], self.n))\n error = np.zeros(q.shape[0])\n success = np.zeros(q.shape[0])\n\n lb = self.qlim[0, :]\n ub = self.qlim[1, :]\n\n for k, qk in enumerate(q):\n\n J = self.jacobe(qk)\n\n N = null(J)\n\n x0 = np.zeros(N.shape[1])\n A = np.r_[N, -N]\n b = np.r_[ub - qk, qk - lb].reshape(A.shape[0],)\n\n con = LinearConstraint(A, -np.inf, b)\n\n res = minimize(\n lambda x: cost(x, ub, lb, qk, N),\n x0, constraints=con)\n\n qstar[k, :] = qk + N @ res.x\n error[k] = res.fun\n success[k] = res.success\n\n if q.shape[0] == 1:\n return qstar[0, :], success[0], error[0]\n else:\n return qstar, success, error", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def getObjective(self, *args):\n return _libsbml.FbcModelPlugin_getObjective(self, *args)", "def getaij(self,i_,j_): # 3\n res,resargs = self.__obj.getaij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _aij_return_value = resargs\n return _aij_return_value", "def coefficient(self) -> float:\n ...", "def conj(self, o): \n return (o.inv()) * self * o", "def getprimalobj(self,whichsol_):\n primalobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getprimalobj(self.__nativep,whichsol_,ctypes.byref(primalobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n primalobj_ = primalobj_.value\n _primalobj_return_value = primalobj_\n return (_primalobj_return_value)", "def GetJ(self, *args):\n return _table.Table_GetJ(self, *args)", "def J(self):\n return self._J", "def J(self, name, q, x=None):\n\n x = self.x_zeros if x is None else x\n funcname = name + '[0,0,0]' if np.allclose(x, 0) else name\n # check for function in dictionary\n if self._J.get(funcname, None) is None:\n self._J[funcname] = self._calc_J(name=name, x=x)\n parameters = tuple(q) + tuple(x)\n return np.array(self._J[funcname](*parameters), dtype='float32')", "def objective(self, x):\n rvs = frozenset(map(frozenset, self._rvs))\n joint = self.construct_joint(x)\n joint = joint.sum(axis=self._others, keepdims=True)\n crv = joint.sum(axis=tuple(flatten(rvs)))\n\n H_crv = h(crv.ravel())\n H = h(joint.ravel()) - H_crv\n\n def I_P(part):\n margs = [ joint.sum(axis=tuple(flatten(rvs - p))) for p in part ]\n a = sum(h(marg.ravel()) - H_crv for marg in margs)\n return (a - H)/(len(part) - 1)\n\n parts = [p for p in partitions(map(frozenset, rvs)) if len(p) > 1]\n\n caekl = min(I_P(p) for p in parts)\n\n return caekl", "def acoeff(self):\n return np.dot(self.mmi,np.dot(self.mmatrix.T,self.bvec))" ]
[ "0.7394904", "0.6306659", "0.6125218", "0.6117796", "0.59879345", "0.5969218", "0.5932629", "0.5920546", "0.59181994", "0.5905317", "0.58951527", "0.58876437", "0.5873334", "0.5761676", "0.57416713", "0.5738999", "0.57350576", "0.57144594", "0.570241", "0.570241", "0.5692089", "0.56871337", "0.56717193", "0.5657627", "0.5643077", "0.56418794", "0.5641621", "0.56316835", "0.56179273", "0.5559242" ]
0.73838377
1
Obtains the complete solution. getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)
def getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_): prosta_ = ctypes.c_int32() solsta_ = ctypes.c_int32() _skc_minlength = self.getnumcon() if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon(): raise ValueError("Array argument skc is not long enough: Is %d, expected %d" % (len(skc_),self.getnumcon())) if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable: raise ValueError("Argument skc must be writable") if skc_ is not None: _skc_tmp = (ctypes.c_int32 * len(skc_))() else: _skc_tmp = None _skx_minlength = self.getnumvar() if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar(): raise ValueError("Array argument skx is not long enough: Is %d, expected %d" % (len(skx_),self.getnumvar())) if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable: raise ValueError("Argument skx must be writable") if skx_ is not None: _skx_tmp = (ctypes.c_int32 * len(skx_))() else: _skx_tmp = None _skn_minlength = self.getnumcone() if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone(): raise ValueError("Array argument skn is not long enough: Is %d, expected %d" % (len(skn_),self.getnumcone())) if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable: raise ValueError("Argument skn must be writable") if skn_ is not None: _skn_tmp = (ctypes.c_int32 * len(skn_))() else: _skn_tmp = None _xc_minlength = self.getnumcon() if self.getnumcon() > 0 and xc_ is not None and len(xc_) != self.getnumcon(): raise ValueError("Array argument xc is not long enough: Is %d, expected %d" % (len(xc_),self.getnumcon())) if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable: raise ValueError("Argument xc must be writable") if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous: _xc_copyarray = False _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif xc_ is not None: _xc_copyarray = True _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64)) _xc_np_tmp[:] = xc_ assert _xc_np_tmp.flags.contiguous _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _xc_copyarray = False _xc_tmp = None _xx_minlength = self.getnumvar() if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar(): raise ValueError("Array argument xx is not long enough: Is %d, expected %d" % (len(xx_),self.getnumvar())) if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable: raise ValueError("Argument xx must be writable") if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous: _xx_copyarray = False _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif xx_ is not None: _xx_copyarray = True _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64)) _xx_np_tmp[:] = xx_ assert _xx_np_tmp.flags.contiguous _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _xx_copyarray = False _xx_tmp = None _y_minlength = self.getnumcon() if self.getnumcon() > 0 and y_ is not None and len(y_) != self.getnumcon(): raise ValueError("Array argument y is not long enough: Is %d, expected %d" % (len(y_),self.getnumcon())) if isinstance(y_,numpy.ndarray) and not y_.flags.writeable: raise ValueError("Argument y must be writable") if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous: _y_copyarray = False _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif y_ is not None: _y_copyarray = True _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64)) _y_np_tmp[:] = y_ assert _y_np_tmp.flags.contiguous _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _y_copyarray = False _y_tmp = None _slc_minlength = self.getnumcon() if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon(): raise ValueError("Array argument slc is not long enough: Is %d, expected %d" % (len(slc_),self.getnumcon())) if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable: raise ValueError("Argument slc must be writable") if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous: _slc_copyarray = False _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif slc_ is not None: _slc_copyarray = True _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64)) _slc_np_tmp[:] = slc_ assert _slc_np_tmp.flags.contiguous _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _slc_copyarray = False _slc_tmp = None _suc_minlength = self.getnumcon() if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon(): raise ValueError("Array argument suc is not long enough: Is %d, expected %d" % (len(suc_),self.getnumcon())) if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable: raise ValueError("Argument suc must be writable") if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous: _suc_copyarray = False _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif suc_ is not None: _suc_copyarray = True _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64)) _suc_np_tmp[:] = suc_ assert _suc_np_tmp.flags.contiguous _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _suc_copyarray = False _suc_tmp = None _slx_minlength = self.getnumvar() if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar(): raise ValueError("Array argument slx is not long enough: Is %d, expected %d" % (len(slx_),self.getnumvar())) if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable: raise ValueError("Argument slx must be writable") if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous: _slx_copyarray = False _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif slx_ is not None: _slx_copyarray = True _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64)) _slx_np_tmp[:] = slx_ assert _slx_np_tmp.flags.contiguous _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _slx_copyarray = False _slx_tmp = None _sux_minlength = self.getnumvar() if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar(): raise ValueError("Array argument sux is not long enough: Is %d, expected %d" % (len(sux_),self.getnumvar())) if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable: raise ValueError("Argument sux must be writable") if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous: _sux_copyarray = False _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif sux_ is not None: _sux_copyarray = True _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64)) _sux_np_tmp[:] = sux_ assert _sux_np_tmp.flags.contiguous _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _sux_copyarray = False _sux_tmp = None _snx_minlength = self.getnumvar() if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar(): raise ValueError("Array argument snx is not long enough: Is %d, expected %d" % (len(snx_),self.getnumvar())) if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable: raise ValueError("Argument snx must be writable") if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous: _snx_copyarray = False _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) elif snx_ is not None: _snx_copyarray = True _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64)) _snx_np_tmp[:] = snx_ assert _snx_np_tmp.flags.contiguous _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double)) else: _snx_copyarray = False _snx_tmp = None res = __library__.MSK_XX_getsolution(self.__nativep,whichsol_,ctypes.byref(prosta_),ctypes.byref(solsta_),_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _prosta_return_value = prosta(prosta_.value) _solsta_return_value = solsta(solsta_.value) if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ] if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ] if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ] if _xc_copyarray: xc_[:] = _xc_np_tmp if _xx_copyarray: xx_[:] = _xx_np_tmp if _y_copyarray: y_[:] = _y_np_tmp if _slc_copyarray: slc_[:] = _slc_np_tmp if _suc_copyarray: suc_[:] = _suc_np_tmp if _slx_copyarray: slx_[:] = _slx_np_tmp if _sux_copyarray: sux_[:] = _sux_np_tmp if _snx_copyarray: snx_[:] = _snx_np_tmp return (_prosta_return_value,_solsta_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n _copyback_skn = False\n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n _copyback_skn = True\n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n _copyback_skn = True\n if skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn has wrong length\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc has wrong length\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y has wrong length\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n _copyback_sux = False\n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n _copyback_sux = True\n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n _copyback_sux = True\n if sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux has wrong length\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res,resargs = self.__obj.getsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value,_solsta_return_value = resargs\n if _copyback_snx:\n snx[:] = _tmparr_snx\n if _copyback_sux:\n sux[:] = _tmparr_sux\n if _copyback_slx:\n slx[:] = _tmparr_slx\n if _copyback_suc:\n suc[:] = _tmparr_suc\n if _copyback_slc:\n slc[:] = _tmparr_slc\n if _copyback_y:\n y[:] = _tmparr_y\n if _copyback_xx:\n xx[:] = _tmparr_xx\n if _copyback_xc:\n xc[:] = _tmparr_xc\n if _copyback_skn:\n for __tmp_var_2 in range(len(skn_)): skn[__tmp_var_2] = stakey(_tmparr_skn[__tmp_var_2])\n if _copyback_skx:\n for __tmp_var_1 in range(len(skx_)): skx[__tmp_var_1] = stakey(_tmparr_skx[__tmp_var_1])\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])\n _solsta_return_value = solsta(_solsta_return_value)\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value,_solsta_return_value", "def putsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n \n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n \n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n \n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n \n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n \n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n \n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n \n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n \n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n \n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n \n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n \n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n \n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n \n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n \n res = self.__obj.putsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))(*skx_)\n else:\n _skx_tmp = None\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))(*skn_)\n else:\n _skn_tmp = None\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_putsolution(self.__nativep,whichsol_,_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def getskc(self,whichsol_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskc(whichsol_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getslc(self,whichsol_,slc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if slc is None: raise TypeError(\"Invalid type for argument slc\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n res = self.__obj.getslc(whichsol_,slc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_slc:\n slc[:] = _tmparr_slc", "def getskx(self,whichsol_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskx(whichsol_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getslc(self,whichsol_,slc_):\n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if slc_ is None:\n raise ValueError(\"Argument slc may not be None\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n res = __library__.MSK_XX_getslc(self.__nativep,whichsol_,_slc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp", "def getskc(self,whichsol_,skc_):\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskc(self.__nativep,whichsol_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getsolsta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolsta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = resargs\n _solsta_return_value = solsta(_solsta_return_value)\n return _solsta_return_value", "def putconsolutioni(self,i_,whichsol_,sk_,x_,sl_,su_):\n res = __library__.MSK_XX_putconsolutioni(self.__nativep,i_,whichsol_,sk_,x_,sl_,su_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsolsta(self,whichsol_):\n solsta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getsolsta(self.__nativep,whichsol_,ctypes.byref(solsta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = solsta(solsta_.value)\n return (_solsta_return_value)", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def get_sol(self):", "def getskn(self,whichsol_,skn_):\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n res = __library__.MSK_XX_getskn(self.__nativep,whichsol_,_skn_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]", "def getskx(self,whichsol_,skx_):\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskx(self.__nativep,whichsol_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def find_solution(self):\r\n for solution in self.solutions:\r\n if self.fitting_function.is_legal_solution(solution):\r\n return solution\r\n return None", "def solutiondef(self,whichsol_):\n isdef_ = ctypes.c_int32()\n res = __library__.MSK_XX_solutiondef(self.__nativep,whichsol_,ctypes.byref(isdef_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n isdef_ = isdef_.value\n _isdef_return_value = isdef_\n return (_isdef_return_value)", "def getinfeasiblesubproblem(self,whichsol_):\n inftask_ = ctypes.c_void_p()\n res = __library__.MSK_XX_getinfeasiblesubproblem(self.__nativep,whichsol_,ctypes.byref(inftask_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _inftask_return_value = Task(nativep = inftask_)\n return (_inftask_return_value)", "def get_solution(self):\r\n return self.solution", "def get_solution(self):\n return self._generate_solution()", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def putsolutioni(self,accmode_,i_,whichsol_,sk_,x_,sl_,su_,sn_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if not isinstance(sk_,stakey): raise TypeError(\"Argument sk has wrong type\")\n res = self.__obj.putsolutioni(accmode_,i_,whichsol_,sk_,x_,sl_,su_,sn_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def analyzesolution(self,whichstream_,whichsol_):\n res = __library__.MSK_XX_analyzesolution(self.__nativep,whichstream_,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def iterative_solver_list(self, which, rhs, *args):\n if which == 'bicg':\n return spla.bicg(self.sp_matrix, rhs, args)\n elif which == \"cg\":\n return spla.cg(self.sp_matrix, rhs, args)\n elif which == \"bicgstab\":\n return spla.bicgstab(self.sp_matrix, rhs, args)\n elif which == \"cgs\":\n return spla.cgs(self.sp_matrix, rhs, args)\n elif which == \"gmres\":\n return spla.gmres(self.sp_matrix, rhs, args)\n elif which == \"lgmres\":\n return spla.lgmres(self.sp_matrix, rhs, args)\n elif which == \"qmr\":\n return spla.qmr(self.sp_matrix, rhs, args)\n else:\n raise NotImplementedError(\"this solver is unknown\")", "def getslx(self,whichsol_,slx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if slx is None: raise TypeError(\"Invalid type for argument slx\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n res = self.__obj.getslx(whichsol_,slx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_slx:\n slx[:] = _tmparr_slx", "def find_solution(self):\n print(\"\\nFinding ICTS Solution...\")\n ######### Fill in the ICTS Algorithm here #########\n result = self.stat_tracker.time(\"time\", lambda: self.bfs())\n if result == -1:\n self.stat_tracker.stats['time'] = -1\n return []\n self.stat_tracker.write_stats_to_file(self.stat_tracker.get_results_file_name())\n return result\n ###################################################", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res" ]
[ "0.8545925", "0.7747821", "0.7333495", "0.7199789", "0.68386203", "0.66661495", "0.6665564", "0.66491985", "0.6588644", "0.6554875", "0.65271825", "0.6473194", "0.6416208", "0.632671", "0.63102174", "0.62922955", "0.62530744", "0.6231695", "0.62263197", "0.61406004", "0.6117046", "0.61076367", "0.60905105", "0.608615", "0.603616", "0.5995865", "0.596758", "0.59629387", "0.59485203", "0.593994" ]
0.8528952
1
Obtains the solution status. getsolsta(self,whichsol_)
def getsolsta(self,whichsol_): solsta_ = ctypes.c_int32() res = __library__.MSK_XX_getsolsta(self.__nativep,whichsol_,ctypes.byref(solsta_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _solsta_return_value = solsta(solsta_.value) return (_solsta_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getsolsta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolsta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = resargs\n _solsta_return_value = solsta(_solsta_return_value)\n return _solsta_return_value", "def getprosta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprosta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = resargs\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def getprosta(self,whichsol_):\n prosta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getprosta(self.__nativep,whichsol_,ctypes.byref(prosta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n return (_prosta_return_value)", "def solutiondef(self,whichsol_):\n isdef_ = ctypes.c_int32()\n res = __library__.MSK_XX_solutiondef(self.__nativep,whichsol_,ctypes.byref(isdef_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n isdef_ = isdef_.value\n _isdef_return_value = isdef_\n return (_isdef_return_value)", "def get_sol(self):", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def get_solution(self):\r\n return self.solution", "def state(self):\n\n return self.solenoid.get()", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def find_solution(self):\n print(\"\\nFinding ICTS Solution...\")\n ######### Fill in the ICTS Algorithm here #########\n result = self.stat_tracker.time(\"time\", lambda: self.bfs())\n if result == -1:\n self.stat_tracker.stats['time'] = -1\n return []\n self.stat_tracker.write_stats_to_file(self.stat_tracker.get_results_file_name())\n return result\n ###################################################", "def analyzesolution(self,whichstream_,whichsol_):\n res = __library__.MSK_XX_analyzesolution(self.__nativep,whichstream_,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solution_state(self):\n return self._solution_state", "def solutiondef(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.solutiondef(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _isdef_return_value = resargs\n return _isdef_return_value", "def getinfeasiblesubproblem(self,whichsol_):\n inftask_ = ctypes.c_void_p()\n res = __library__.MSK_XX_getinfeasiblesubproblem(self.__nativep,whichsol_,ctypes.byref(inftask_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _inftask_return_value = Task(nativep = inftask_)\n return (_inftask_return_value)", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def getsuc(self,whichsol_,suc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if suc is None: raise TypeError(\"Invalid type for argument suc\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n res = self.__obj.getsuc(whichsol_,suc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_suc:\n suc[:] = _tmparr_suc", "def find_solution(self):\r\n for solution in self.solutions:\r\n if self.fitting_function.is_legal_solution(solution):\r\n return solution\r\n return None", "def solved(self):\r\n return self.puzzle.solved", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def readsolution(self,whichsol_,filename_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.readsolution(whichsol_,filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def site(self, code, soln):\n###############################################################################\n return(self.estimates[code, soln])", "def updatesolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.updatesolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n prosta_ = ctypes.c_int32()\n solsta_ = ctypes.c_int32()\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n _xc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),self.getnumcon()))\n if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable:\n raise ValueError(\"Argument xc must be writable\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n _xx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),self.getnumvar()))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n _y_minlength = self.getnumcon()\n if self.getnumcon() > 0 and y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),self.getnumcon()))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n _suc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),self.getnumcon()))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n _slx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx is not long enough: Is %d, expected %d\" % (len(slx_),self.getnumvar()))\n if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable:\n raise ValueError(\"Argument slx must be writable\")\n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n _sux_minlength = self.getnumvar()\n if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux is not long enough: Is %d, expected %d\" % (len(sux_),self.getnumvar()))\n if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable:\n raise ValueError(\"Argument sux must be writable\")\n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsolution(self.__nativep,whichsol_,ctypes.byref(prosta_),ctypes.byref(solsta_),_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n _solsta_return_value = solsta(solsta_.value)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]\n if _xc_copyarray:\n xc_[:] = _xc_np_tmp\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp\n if _y_copyarray:\n y_[:] = _y_np_tmp\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp\n if _slx_copyarray:\n slx_[:] = _slx_np_tmp\n if _sux_copyarray:\n sux_[:] = _sux_np_tmp\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp\n return (_prosta_return_value,_solsta_return_value)", "def fetch(self):\n return self.sol", "def update_current_sol_and_cost(self,sol=None):\n\n # Update current sol if argument given\n if sol is not None:\n self.current_sol = sol\n \n # Update residual and cost\n try:\n self.residual = self.sketch_reweighted - self.sketch_of_solution(self.current_sol)\n self.current_sol_cost = np.linalg.norm(self.residual)\n except AttributeError: # We are here if self.current_sol does not exist yet\n self.current_sol, self.residual = None, self.sketch_reweighted\n self.current_sol_cost = np.inf", "def getsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n _copyback_skn = False\n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n _copyback_skn = True\n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n _copyback_skn = True\n if skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn has wrong length\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc has wrong length\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y has wrong length\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n _copyback_sux = False\n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n _copyback_sux = True\n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n _copyback_sux = True\n if sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux has wrong length\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res,resargs = self.__obj.getsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value,_solsta_return_value = resargs\n if _copyback_snx:\n snx[:] = _tmparr_snx\n if _copyback_sux:\n sux[:] = _tmparr_sux\n if _copyback_slx:\n slx[:] = _tmparr_slx\n if _copyback_suc:\n suc[:] = _tmparr_suc\n if _copyback_slc:\n slc[:] = _tmparr_slc\n if _copyback_y:\n y[:] = _tmparr_y\n if _copyback_xx:\n xx[:] = _tmparr_xx\n if _copyback_xc:\n xc[:] = _tmparr_xc\n if _copyback_skn:\n for __tmp_var_2 in range(len(skn_)): skn[__tmp_var_2] = stakey(_tmparr_skn[__tmp_var_2])\n if _copyback_skx:\n for __tmp_var_1 in range(len(skx_)): skx[__tmp_var_1] = stakey(_tmparr_skx[__tmp_var_1])\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])\n _solsta_return_value = solsta(_solsta_return_value)\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value,_solsta_return_value", "def get_solution(self):\n return self._generate_solution()" ]
[ "0.8332325", "0.67369443", "0.6637133", "0.65710145", "0.63403517", "0.6337015", "0.6313975", "0.6290875", "0.6236583", "0.61838657", "0.6070488", "0.6067338", "0.6028722", "0.60181767", "0.59323597", "0.5913741", "0.58426934", "0.5825443", "0.57618636", "0.56496173", "0.56484896", "0.5608392", "0.5606637", "0.55952346", "0.5591939", "0.5590343", "0.5557385", "0.5545225", "0.5536571", "0.5533096" ]
0.79787683
1
Obtains the problem status. getprosta(self,whichsol_)
def getprosta(self,whichsol_): prosta_ = ctypes.c_int32() res = __library__.MSK_XX_getprosta(self.__nativep,whichsol_,ctypes.byref(prosta_)) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) _prosta_return_value = prosta(prosta_.value) return (_prosta_return_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getprosta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprosta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = resargs\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value", "def getsolsta(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolsta(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = resargs\n _solsta_return_value = solsta(_solsta_return_value)\n return _solsta_return_value", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def getsolsta(self,whichsol_):\n solsta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getsolsta(self.__nativep,whichsol_,ctypes.byref(solsta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = solsta(solsta_.value)\n return (_solsta_return_value)", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def getsuc(self,whichsol_,suc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if suc is None: raise TypeError(\"Invalid type for argument suc\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n res = self.__obj.getsuc(whichsol_,suc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_suc:\n suc[:] = _tmparr_suc", "def getinfeasiblesubproblem(self,whichsol_):\n inftask_ = ctypes.c_void_p()\n res = __library__.MSK_XX_getinfeasiblesubproblem(self.__nativep,whichsol_,ctypes.byref(inftask_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _inftask_return_value = Task(nativep = inftask_)\n return (_inftask_return_value)", "def actualSolve(self, lp):\n\t\t\tlp.status = pulpCOIN.solve(lp.objective, lp.constraints, lp.sense, \n\t\t\t\tself.msg, self.mip, self.presolve, self.dual, self.crash, self.scale,\n\t\t\t\tself.rounding, self.integerPresolve, self.strong, self.cuts)\n\t\t\treturn lp.status", "def state(self):\n\n return self.solenoid.get()", "def actualSolve(self, lp):\n\t\t\tlp.status = pulpCPLEX.solve(lp.objective, lp.constraints, lp.sense, self.msg,\n\t\t\t\tself.mip, self.timeLimit)\n\t\t\treturn lp.status", "def get_working_status(self):\n #TODO: fix some issue on restarting and so on about current status\n return self.working_map[self.get_status()]", "def getprimalobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getprimalobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _primalobj_return_value = resargs\n return _primalobj_return_value", "def get_sol(self):", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def solved(self):\r\n return self.puzzle.solved", "def get_solution(self):\r\n return self.solution", "def actualSolve(self, lp):\n\t\t\tlp.status = pulpGLPK.solve(lp.objective, lp.constraints, lp.sense, self.msg,\n\t\t\t\tself.mip, self.presolve)\n\t\t\treturn lp.status", "def solution_state(self):\n return self._solution_state", "def get_solution(self):\n return self.P_plot[-1]", "def getprimalobj(self,whichsol_):\n primalobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getprimalobj(self.__nativep,whichsol_,ctypes.byref(primalobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n primalobj_ = primalobj_.value\n _primalobj_return_value = primalobj_\n return (_primalobj_return_value)", "def sketch_of_solution(self,sol=None):\n raise NotImplementedError", "def get_pir_status(self):\n response = self.parent.pir.status()\n return response[0]", "def getsuc(self,whichsol_,suc_):\n _suc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),self.getnumcon()))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if suc_ is None:\n raise ValueError(\"Argument suc may not be None\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n res = __library__.MSK_XX_getsuc(self.__nativep,whichsol_,_suc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp", "def script_status(script_id):\n status = _u2i(_pigpio_command(_control, _PI_CMD_PROCP, script_id, 0))\n if status >= 0:\n param = struct.unpack('IIIIIIIIII', _control.recv(40))\n return status, param\n return status, ()", "def problem(self):\n return self['problem']", "def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def priority(self):\n return self._pri" ]
[ "0.8061198", "0.6539571", "0.63518715", "0.62043124", "0.588598", "0.5827498", "0.5815437", "0.5792493", "0.568136", "0.5551725", "0.55403644", "0.5519703", "0.55097705", "0.5475015", "0.5456512", "0.5454256", "0.5443278", "0.5402819", "0.53696334", "0.5313365", "0.5283332", "0.52545816", "0.52498674", "0.5235115", "0.5222368", "0.52042437", "0.5177077", "0.5171893", "0.51399356", "0.5127817" ]
0.76344854
1
Obtains the status keys for the constraints. getskc(self,whichsol_,skc_)
def getskc(self,whichsol_,skc_): _skc_minlength = self.getnumcon() if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon(): raise ValueError("Array argument skc is not long enough: Is %d, expected %d" % (len(skc_),self.getnumcon())) if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable: raise ValueError("Argument skc must be writable") if skc_ is not None: _skc_tmp = (ctypes.c_int32 * len(skc_))() else: _skc_tmp = None res = __library__.MSK_XX_getskc(self.__nativep,whichsol_,_skc_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getskc(self,whichsol_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskc(whichsol_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n prosta_ = ctypes.c_int32()\n solsta_ = ctypes.c_int32()\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n _xc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),self.getnumcon()))\n if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable:\n raise ValueError(\"Argument xc must be writable\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n _xx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),self.getnumvar()))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n _y_minlength = self.getnumcon()\n if self.getnumcon() > 0 and y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),self.getnumcon()))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n _suc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),self.getnumcon()))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n _slx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx is not long enough: Is %d, expected %d\" % (len(slx_),self.getnumvar()))\n if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable:\n raise ValueError(\"Argument slx must be writable\")\n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n _sux_minlength = self.getnumvar()\n if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux is not long enough: Is %d, expected %d\" % (len(sux_),self.getnumvar()))\n if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable:\n raise ValueError(\"Argument sux must be writable\")\n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsolution(self.__nativep,whichsol_,ctypes.byref(prosta_),ctypes.byref(solsta_),_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n _solsta_return_value = solsta(solsta_.value)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]\n if _xc_copyarray:\n xc_[:] = _xc_np_tmp\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp\n if _y_copyarray:\n y_[:] = _y_np_tmp\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp\n if _slx_copyarray:\n slx_[:] = _slx_np_tmp\n if _sux_copyarray:\n sux_[:] = _sux_np_tmp\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp\n return (_prosta_return_value,_solsta_return_value)", "def getskn(self,whichsol_,skn_):\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n res = __library__.MSK_XX_getskn(self.__nativep,whichsol_,_skn_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]", "def getskx(self,whichsol_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskx(whichsol_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n _copyback_skn = False\n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n _copyback_skn = True\n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n _copyback_skn = True\n if skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn has wrong length\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc has wrong length\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y has wrong length\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n _copyback_sux = False\n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n _copyback_sux = True\n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n _copyback_sux = True\n if sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux has wrong length\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res,resargs = self.__obj.getsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value,_solsta_return_value = resargs\n if _copyback_snx:\n snx[:] = _tmparr_snx\n if _copyback_sux:\n sux[:] = _tmparr_sux\n if _copyback_slx:\n slx[:] = _tmparr_slx\n if _copyback_suc:\n suc[:] = _tmparr_suc\n if _copyback_slc:\n slc[:] = _tmparr_slc\n if _copyback_y:\n y[:] = _tmparr_y\n if _copyback_xx:\n xx[:] = _tmparr_xx\n if _copyback_xc:\n xc[:] = _tmparr_xc\n if _copyback_skn:\n for __tmp_var_2 in range(len(skn_)): skn[__tmp_var_2] = stakey(_tmparr_skn[__tmp_var_2])\n if _copyback_skx:\n for __tmp_var_1 in range(len(skx_)): skx[__tmp_var_1] = stakey(_tmparr_skx[__tmp_var_1])\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])\n _solsta_return_value = solsta(_solsta_return_value)\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value,_solsta_return_value", "def getskx(self,whichsol_,skx_):\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskx(self.__nativep,whichsol_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def pk_cs(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n # print(self.pk.shape)\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex)\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True)\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n objective = cp.Minimize(cp.pnorm(x_cvx, p=1))\n constraints = [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon]#[H*x == pm]\n # Create the problem and solve\n problem = cp.Problem(objective, constraints)\n problem.solve(solver=cp.SCS, verbose=True) \n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()\n return self.pk", "def getslc(self,whichsol_,slc_):\n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if slc_ is None:\n raise ValueError(\"Argument slc may not be None\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n res = __library__.MSK_XX_getslc(self.__nativep,whichsol_,_slc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp", "def putskc(self,whichsol_,skc_):\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if skc_ is None:\n raise ValueError(\"Argument skc cannot be None\")\n if skc_ is None:\n raise ValueError(\"Argument skc may not be None\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_putskc(self.__nativep,whichsol_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getslc(self,whichsol_,slc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if slc is None: raise TypeError(\"Invalid type for argument slc\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n res = self.__obj.getslc(whichsol_,slc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_slc:\n slc[:] = _tmparr_slc", "def putskc(self,whichsol_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None: raise TypeError(\"Invalid type for argument skc\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.putskc(whichsol_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def _spc(self, spcID):\r\n if spcID in self.add_constraints:\r\n return self.add_constraints[spcID]\r\n return self.constraints[spcID]", "def getclskey(cls, tmpcls, op, slot):\n return cls.getClsStagePri(tmpcls, op, slot)", "def pk_constrained(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex) # cvxpy does not accept floats, apparently\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True) # create x variable\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n problem = cp.Problem(cp.Minimize(cp.norm2(x_cvx)**2),\n [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon])\n problem.solve(solver=cp.SCS, verbose=False)\n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def putskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_putskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n \n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n \n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n \n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n \n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n \n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n \n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n \n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n \n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n \n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n \n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n \n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n \n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n \n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n \n res = self.__obj.putsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getskxslice(self,whichsol_,first_,last_,skx_):\n _skx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),((last_) - (first_))))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskxslice(self.__nativep,whichsol_,first_,last_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def ndxLCS(hh, vv):\n B=LCS.getB(hh,vv)\n return LCS.backtrack2(B);", "def csc():\n endcaps = [1,2]\n disks = [1,2,3,4]\n rings = {1:[1,2,3], # different rings for different disks\n 2:[1,2], \n 3:[1,2],\n 4:[1,2]}\n\n csc_info = {\n \"endcaps\":endcaps,\n \"disks\": disks,\n \"rings\": rings}\n\n return csc_info", "def getskxslice(self,whichsol_,first_,last_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskxslice(whichsol_,first_,last_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getsolsta(self,whichsol_):\n solsta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getsolsta(self.__nativep,whichsol_,ctypes.byref(solsta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = solsta(solsta_.value)\n return (_solsta_return_value)", "def putskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.putskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def vcac_getvm_sr_status(self, serv_req):\n self.reqdata=serv_req\n #Keep requesting the status of the deployment and break when the process is no longer \"IN_PROGRESS\"\n flag=1\n mailer=0\n s_once=1\t\t\n while flag:\n mailer += 1\n start = time.time()\t\t\n #sleep(10)\n try:\n jfile=self.data['rundir'] + '/' + self.reqdata + '.json'\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd = \"cd %s && ./cloudclient.sh vra request detail --id %s \" \\\n \"--format JSON --export %s\" % \\\n ( vrapath, self.reqdata, jfile )\n logging.info(\"- vcac cloudclient monitor \" \\\n \"request id \" + self.reqdata + \" status\")\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_sr_status: %s\" % str(e)\n sys.exit(1)\n\t\t\t\t\n\t\t\t# check file exist and not empty\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n with open(jfile) as data_file:\n\t\t\t\t requestData = json.load(data_file)\n if requestData['state'] == \"SUCCESSFUL\":\n flag=0\n self.gtintval=mailer\n tdate=str(datetime.timedelta(seconds=self.gtintval))\n print \"\\n\"\n print \"SR Reached: %s (HH:MM:SS)\\n\" % tdate\n print \"SR [ %s ] done, status changed from \" \\\n \"IN_PROGRESS to %s\\n\" % \\\n ( requestData['requestNumber'], requestData['state'])\n print \"\\n\"\n break\n\n #Work out of the task failed and if not set \n #the state variable\n if requestData['state'] == \"PROVIDER_FAILED\" or \\\n requestData['state'] == \"FAILED\":\n state = requestData['state']\n reason = requestData['requestCompletion']['completionDetails']\n print \"- vcac cloudclient ERROR: %s\" % state\n ops=\"\"\n self.update_helpdesk(requestData)\n # Need to add some valuable failed data and do not exit.\n #sys.exit(\" - CLOUDCLIENT ERROR: \" + state)\n return requestData\n\n end = time.time()\n g=str(datetime.timedelta(seconds=(end - start)))\n parts=g.split(\":\")\n seconds = int(parts[0])*(60*60) + \\\n int(parts[1])*60 + \\\n float(parts[2])\n time.sleep(60.0)\n mailer = mailer + seconds\n mailer = mailer + 60\n logging.debug('mailer count %s' % mailer)\n if int(mailer) >= 7200 and s_once:\n print \"\\n\"\n print \"%s\\n\" % msgtext\n try:\n print \"Sending notification to IT for \", \\\n \"service request: %s\\n\" % requestData['requestNumber']\n print \"\\n\"\n self.ops='gen'\n self.notify_user(requestData, self.ops)\n logging.info('Notification send ......')\n except:\n pass\n s_once=0\n continue\n else:\n logging.info('No need to send notification ......')\n\n logging.info(\"- vcac cloudclient request \" \\\n \"status : %s\" % ( requestData['state'] ))\n \n return requestData", "def putconsolutioni(self,i_,whichsol_,sk_,x_,sl_,su_):\n res = __library__.MSK_XX_putconsolutioni(self.__nativep,i_,whichsol_,sk_,x_,sl_,su_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getConstraint(self):\n return self.gk, self.g_mink, self.g_maxk", "def active_ssms(self, k):\n\n if k == self.active_ssm_cache1_k:\n return self.active_ssm_cache1_v\n elif k == self.active_ssm_cache2_k:\n return self.active_ssm_cache2_v\n\n i = np.searchsorted(self.changepoints, k, side=\"right\")-1\n active_ssms = self.active_sets[i]\n\n self.active_ssm_cache2_k = self.active_ssm_cache1_k\n self.active_ssm_cache2_v = self.active_ssm_cache1_v\n self.active_ssm_cache1_k = k\n self.active_ssm_cache1_v = active_ssms\n return active_ssms", "def get_next_conf_keys(self):\n C_List = []\n for key in self.Poss_Tree:\n key_c = int(str(key)[-1])\n for choice in self.Poss_Tree[key]:\n if choice == key_c:\n C_List.append(int(construct_pass(key, choice)))\n return C_List" ]
[ "0.7628529", "0.64641875", "0.6297897", "0.6218528", "0.6176679", "0.60891193", "0.60776615", "0.6005346", "0.58437234", "0.5784157", "0.5729363", "0.56601924", "0.5591095", "0.5427357", "0.54007435", "0.5310639", "0.5267366", "0.52485293", "0.51785713", "0.5076993", "0.50753546", "0.50426024", "0.50356114", "0.5030415", "0.49771422", "0.49602044", "0.49349225", "0.4872676", "0.48659", "0.4860948" ]
0.7737572
0
Obtains the status keys for the scalar variables. getskx(self,whichsol_,skx_)
def getskx(self,whichsol_,skx_): _skx_minlength = self.getnumvar() if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar(): raise ValueError("Array argument skx is not long enough: Is %d, expected %d" % (len(skx_),self.getnumvar())) if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable: raise ValueError("Argument skx must be writable") if skx_ is not None: _skx_tmp = (ctypes.c_int32 * len(skx_))() else: _skx_tmp = None res = __library__.MSK_XX_getskx(self.__nativep,whichsol_,_skx_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getskx(self,whichsol_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskx(whichsol_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getsnx(self,whichsol_,snx_):\n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if snx_ is None:\n raise ValueError(\"Argument snx may not be None\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsnx(self.__nativep,whichsol_,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp", "def getskxslice(self,whichsol_,first_,last_,skx_):\n _skx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),((last_) - (first_))))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskxslice(self.__nativep,whichsol_,first_,last_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def getskc(self,whichsol_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskc(whichsol_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getskxslice(self,whichsol_,first_,last_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskxslice(whichsol_,first_,last_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n _copyback_skn = False\n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n _copyback_skn = True\n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n _copyback_skn = True\n if skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn has wrong length\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc has wrong length\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y has wrong length\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n _copyback_sux = False\n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n _copyback_sux = True\n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n _copyback_sux = True\n if sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux has wrong length\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res,resargs = self.__obj.getsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value,_solsta_return_value = resargs\n if _copyback_snx:\n snx[:] = _tmparr_snx\n if _copyback_sux:\n sux[:] = _tmparr_sux\n if _copyback_slx:\n slx[:] = _tmparr_slx\n if _copyback_suc:\n suc[:] = _tmparr_suc\n if _copyback_slc:\n slc[:] = _tmparr_slc\n if _copyback_y:\n y[:] = _tmparr_y\n if _copyback_xx:\n xx[:] = _tmparr_xx\n if _copyback_xc:\n xc[:] = _tmparr_xc\n if _copyback_skn:\n for __tmp_var_2 in range(len(skn_)): skn[__tmp_var_2] = stakey(_tmparr_skn[__tmp_var_2])\n if _copyback_skx:\n for __tmp_var_1 in range(len(skx_)): skx[__tmp_var_1] = stakey(_tmparr_skx[__tmp_var_1])\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])\n _solsta_return_value = solsta(_solsta_return_value)\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value,_solsta_return_value", "def getsnx(self,whichsol_,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if snx is None: raise TypeError(\"Invalid type for argument snx\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res = self.__obj.getsnx(whichsol_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_snx:\n snx[:] = _tmparr_snx", "def getskc(self,whichsol_,skc_):\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskc(self.__nativep,whichsol_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def putskx(self,whichsol_,skx_):\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if skx_ is None:\n raise ValueError(\"Argument skx cannot be None\")\n if skx_ is None:\n raise ValueError(\"Argument skx may not be None\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))(*skx_)\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_putskx(self.__nativep,whichsol_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getskn(self,whichsol_,skn_):\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n res = __library__.MSK_XX_getskn(self.__nativep,whichsol_,_skn_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]", "def getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n prosta_ = ctypes.c_int32()\n solsta_ = ctypes.c_int32()\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n _xc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),self.getnumcon()))\n if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable:\n raise ValueError(\"Argument xc must be writable\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n _xx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),self.getnumvar()))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n _y_minlength = self.getnumcon()\n if self.getnumcon() > 0 and y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),self.getnumcon()))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n _suc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),self.getnumcon()))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n _slx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx is not long enough: Is %d, expected %d\" % (len(slx_),self.getnumvar()))\n if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable:\n raise ValueError(\"Argument slx must be writable\")\n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n _sux_minlength = self.getnumvar()\n if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux is not long enough: Is %d, expected %d\" % (len(sux_),self.getnumvar()))\n if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable:\n raise ValueError(\"Argument sux must be writable\")\n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsolution(self.__nativep,whichsol_,ctypes.byref(prosta_),ctypes.byref(solsta_),_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n _solsta_return_value = solsta(solsta_.value)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]\n if _xc_copyarray:\n xc_[:] = _xc_np_tmp\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp\n if _y_copyarray:\n y_[:] = _y_np_tmp\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp\n if _slx_copyarray:\n slx_[:] = _slx_np_tmp\n if _sux_copyarray:\n sux_[:] = _sux_np_tmp\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp\n return (_prosta_return_value,_solsta_return_value)", "def putskx(self,whichsol_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skx is None: raise TypeError(\"Invalid type for argument skx\")\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.putskx(whichsol_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getslx(self,whichsol_,slx_):\n _slx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx is not long enough: Is %d, expected %d\" % (len(slx_),self.getnumvar()))\n if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable:\n raise ValueError(\"Argument slx must be writable\")\n if slx_ is None:\n raise ValueError(\"Argument slx may not be None\")\n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n res = __library__.MSK_XX_getslx(self.__nativep,whichsol_,_slx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _slx_copyarray:\n slx_[:] = _slx_np_tmp", "def getslx(self,whichsol_,slx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if slx is None: raise TypeError(\"Invalid type for argument slx\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n res = self.__obj.getslx(whichsol_,slx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_slx:\n slx[:] = _tmparr_slx", "def putsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n \n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n \n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n \n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n \n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n \n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n \n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n \n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n \n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n \n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n \n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n \n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n \n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n \n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n \n res = self.__obj.putsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z", "def __getitem__(self, key):\n if key>=len(self.trained_rqrmi):\n raise KeyError('Stage index invalid')\n return self.trained_rqrmi[key]", "def putskxslice(self,whichsol_,first_,last_,skx_):\n _skx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),((last_) - (first_))))\n if skx_ is None:\n raise ValueError(\"Argument skx cannot be None\")\n if skx_ is None:\n raise ValueError(\"Argument skx may not be None\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))(*skx_)\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_putskxslice(self.__nativep,whichsol_,first_,last_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def KS(self, using, dx=0.0001):\n pits = np.array(self.PIT(using=using,dx=dx))\n ks_result = skgof.ks_test(pits, stats.uniform())\n return ks_result.statistic, ks_result.pvalue", "def get_stig_x(self):\n raise NotImplementedError", "def putsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))(*skx_)\n else:\n _skx_tmp = None\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))(*skn_)\n else:\n _skn_tmp = None\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_putsolution(self.__nativep,whichsol_,_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsnxslice(self,whichsol_,first_,last_,snx_):\n _snx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and snx_ is not None and len(snx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),((last_) - (first_))))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsnxslice(self.__nativep,whichsol_,first_,last_,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp", "def getclskey(cls, tmpcls, op, slot):\n return cls.getClsStagePri(tmpcls, op, slot)", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def getskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def get_sres(self, x: np.ndarray) -> np.ndarray:\n sres = self(x, (1,), MODE_RES)\n return sres", "def getxx(self,whichsol_,xx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if xx is None: raise TypeError(\"Invalid type for argument xx\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n res = self.__obj.getxx(whichsol_,xx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_xx:\n xx[:] = _tmparr_xx", "def getSolRatioVarIndx( self, var ):\n \n self.updateAdb( )\n\n if var in self.solNames:\n return self.solNames[ var ]\n elif var in self.solNames.values():\n return var\n else:\n return -1", "def putskxslice(self,whichsol_,first_,last_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skx is None: raise TypeError(\"Invalid type for argument skx\")\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.putskxslice(whichsol_,first_,last_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsux(self,whichsol_,sux_):\n _sux_minlength = self.getnumvar()\n if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux is not long enough: Is %d, expected %d\" % (len(sux_),self.getnumvar()))\n if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable:\n raise ValueError(\"Argument sux must be writable\")\n if sux_ is None:\n raise ValueError(\"Argument sux may not be None\")\n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n res = __library__.MSK_XX_getsux(self.__nativep,whichsol_,_sux_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _sux_copyarray:\n sux_[:] = _sux_np_tmp" ]
[ "0.78065765", "0.63683814", "0.62353045", "0.6215446", "0.61820394", "0.61591494", "0.6133144", "0.61062807", "0.6047507", "0.59702194", "0.5943474", "0.5915708", "0.580595", "0.5664523", "0.5435939", "0.53950316", "0.5300313", "0.52181596", "0.51550823", "0.51532644", "0.5100311", "0.50950074", "0.5094258", "0.5086898", "0.5072478", "0.50652766", "0.5065119", "0.50562376", "0.50459206", "0.50246364" ]
0.78649473
0
Obtains the status keys for the conic constraints. getskn(self,whichsol_,skn_)
def getskn(self,whichsol_,skn_): _skn_minlength = self.getnumcone() if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone(): raise ValueError("Array argument skn is not long enough: Is %d, expected %d" % (len(skn_),self.getnumcone())) if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable: raise ValueError("Argument skn must be writable") if skn_ is not None: _skn_tmp = (ctypes.c_int32 * len(skn_))() else: _skn_tmp = None res = __library__.MSK_XX_getskn(self.__nativep,whichsol_,_skn_tmp) if res != 0: _,msg = self.__getlasterror(res) raise Error(rescode(res),msg) if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getskc(self,whichsol_,skc_):\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskc(self.__nativep,whichsol_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getskc(self,whichsol_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskc(whichsol_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getskx(self,whichsol_,skx_):\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskx(self.__nativep,whichsol_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def getskx(self,whichsol_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskx(whichsol_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n prosta_ = ctypes.c_int32()\n solsta_ = ctypes.c_int32()\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n _skx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),self.getnumvar()))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n _skn_minlength = self.getnumcone()\n if self.getnumcone() > 0 and skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn is not long enough: Is %d, expected %d\" % (len(skn_),self.getnumcone()))\n if isinstance(skn_,numpy.ndarray) and not skn_.flags.writeable:\n raise ValueError(\"Argument skn must be writable\")\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))()\n else:\n _skn_tmp = None\n _xc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc is not long enough: Is %d, expected %d\" % (len(xc_),self.getnumcon()))\n if isinstance(xc_,numpy.ndarray) and not xc_.flags.writeable:\n raise ValueError(\"Argument xc must be writable\")\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n _xx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),self.getnumvar()))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n _y_minlength = self.getnumcon()\n if self.getnumcon() > 0 and y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),self.getnumcon()))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n _suc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc is not long enough: Is %d, expected %d\" % (len(suc_),self.getnumcon()))\n if isinstance(suc_,numpy.ndarray) and not suc_.flags.writeable:\n raise ValueError(\"Argument suc must be writable\")\n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n _slx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx is not long enough: Is %d, expected %d\" % (len(slx_),self.getnumvar()))\n if isinstance(slx_,numpy.ndarray) and not slx_.flags.writeable:\n raise ValueError(\"Argument slx must be writable\")\n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n _sux_minlength = self.getnumvar()\n if self.getnumvar() > 0 and sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux is not long enough: Is %d, expected %d\" % (len(sux_),self.getnumvar()))\n if isinstance(sux_,numpy.ndarray) and not sux_.flags.writeable:\n raise ValueError(\"Argument sux must be writable\")\n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsolution(self.__nativep,whichsol_,ctypes.byref(prosta_),ctypes.byref(solsta_),_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n _solsta_return_value = solsta(solsta_.value)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]\n if skn_ is not None: skn_[:] = [ stakey(v) for v in _skn_tmp[0:len(skn_)] ]\n if _xc_copyarray:\n xc_[:] = _xc_np_tmp\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp\n if _y_copyarray:\n y_[:] = _y_np_tmp\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp\n if _suc_copyarray:\n suc_[:] = _suc_np_tmp\n if _slx_copyarray:\n slx_[:] = _slx_np_tmp\n if _sux_copyarray:\n sux_[:] = _sux_np_tmp\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp\n return (_prosta_return_value,_solsta_return_value)", "def getsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc has wrong length\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != self.getnumvar():\n raise ValueError(\"Array argument skx has wrong length\")\n _copyback_skn = False\n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n _copyback_skn = True\n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n _copyback_skn = True\n if skn_ is not None and len(skn_) != self.getnumcone():\n raise ValueError(\"Array argument skn has wrong length\")\n _copyback_xc = False\n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n _copyback_xc = True\n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n _copyback_xc = True\n if xc_ is not None and len(xc_) != self.getnumcon():\n raise ValueError(\"Array argument xc has wrong length\")\n _copyback_xx = False\n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n _copyback_xx = True\n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n _copyback_xx = True\n if xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx has wrong length\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if y_ is not None and len(y_) != self.getnumcon():\n raise ValueError(\"Array argument y has wrong length\")\n _copyback_slc = False\n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n _copyback_slc = True\n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n _copyback_slc = True\n if slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc has wrong length\")\n _copyback_suc = False\n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n _copyback_suc = True\n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n _copyback_suc = True\n if suc_ is not None and len(suc_) != self.getnumcon():\n raise ValueError(\"Array argument suc has wrong length\")\n _copyback_slx = False\n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n _copyback_slx = True\n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n _copyback_slx = True\n if slx_ is not None and len(slx_) != self.getnumvar():\n raise ValueError(\"Array argument slx has wrong length\")\n _copyback_sux = False\n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n _copyback_sux = True\n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n _copyback_sux = True\n if sux_ is not None and len(sux_) != self.getnumvar():\n raise ValueError(\"Array argument sux has wrong length\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res,resargs = self.__obj.getsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value,_solsta_return_value = resargs\n if _copyback_snx:\n snx[:] = _tmparr_snx\n if _copyback_sux:\n sux[:] = _tmparr_sux\n if _copyback_slx:\n slx[:] = _tmparr_slx\n if _copyback_suc:\n suc[:] = _tmparr_suc\n if _copyback_slc:\n slc[:] = _tmparr_slc\n if _copyback_y:\n y[:] = _tmparr_y\n if _copyback_xx:\n xx[:] = _tmparr_xx\n if _copyback_xc:\n xc[:] = _tmparr_xc\n if _copyback_skn:\n for __tmp_var_2 in range(len(skn_)): skn[__tmp_var_2] = stakey(_tmparr_skn[__tmp_var_2])\n if _copyback_skx:\n for __tmp_var_1 in range(len(skx_)): skx[__tmp_var_1] = stakey(_tmparr_skx[__tmp_var_1])\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])\n _solsta_return_value = solsta(_solsta_return_value)\n _prosta_return_value = prosta(_prosta_return_value)\n return _prosta_return_value,_solsta_return_value", "def getprosta(self,whichsol_):\n prosta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getprosta(self.__nativep,whichsol_,ctypes.byref(prosta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _prosta_return_value = prosta(prosta_.value)\n return (_prosta_return_value)", "def getsolutioni(self,accmode_,i_,whichsol_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioni(accmode_,i_,whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value = resargs\n _sk_return_value = stakey(_sk_return_value)\n return _sk_return_value,_x_return_value,_sl_return_value,_su_return_value,_sn_return_value", "def pk_cs(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n # print(self.pk.shape)\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex)\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True)\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n objective = cp.Minimize(cp.pnorm(x_cvx, p=1))\n constraints = [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon]#[H*x == pm]\n # Create the problem and solve\n problem = cp.Problem(objective, constraints)\n problem.solve(solver=cp.SCS, verbose=True) \n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()\n return self.pk", "def getinfeasiblesubproblem(self,whichsol_):\n inftask_ = ctypes.c_void_p()\n res = __library__.MSK_XX_getinfeasiblesubproblem(self.__nativep,whichsol_,ctypes.byref(inftask_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _inftask_return_value = Task(nativep = inftask_)\n return (_inftask_return_value)", "def getskcslice(self,whichsol_,first_,last_,skc_):\n _skc_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),((last_) - (first_))))\n if isinstance(skc_,numpy.ndarray) and not skc_.flags.writeable:\n raise ValueError(\"Argument skc must be writable\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))()\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_getskcslice(self.__nativep,whichsol_,first_,last_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skc_ is not None: skc_[:] = [ stakey(v) for v in _skc_tmp[0:len(skc_)] ]", "def getsolsta(self,whichsol_):\n solsta_ = ctypes.c_int32()\n res = __library__.MSK_XX_getsolsta(self.__nativep,whichsol_,ctypes.byref(solsta_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _solsta_return_value = solsta(solsta_.value)\n return (_solsta_return_value)", "def pk_constrained(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex) # cvxpy does not accept floats, apparently\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True) # create x variable\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n problem = cp.Problem(cp.Minimize(cp.norm2(x_cvx)**2),\n [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon])\n problem.solve(solver=cp.SCS, verbose=False)\n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()", "def getskcslice(self,whichsol_,first_,last_,skc): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skc = False\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n _copyback_skc = True\n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n _copyback_skc = True\n if skc_ is not None and len(skc_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skc has wrong length\")\n res = self.__obj.getskcslice(whichsol_,first_,last_,skc_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skc:\n for __tmp_var_0 in range(len(skc_)): skc[__tmp_var_0] = stakey(_tmparr_skc[__tmp_var_0])", "def getclskey(cls, tmpcls, op, slot):\n return cls.getClsStagePri(tmpcls, op, slot)", "def putconsolutioni(self,i_,whichsol_,sk_,x_,sl_,su_):\n res = __library__.MSK_XX_putconsolutioni(self.__nativep,i_,whichsol_,sk_,x_,sl_,su_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getskxslice(self,whichsol_,first_,last_,skx_):\n _skx_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx is not long enough: Is %d, expected %d\" % (len(skx_),((last_) - (first_))))\n if isinstance(skx_,numpy.ndarray) and not skx_.flags.writeable:\n raise ValueError(\"Argument skx must be writable\")\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))()\n else:\n _skx_tmp = None\n res = __library__.MSK_XX_getskxslice(self.__nativep,whichsol_,first_,last_,_skx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if skx_ is not None: skx_[:] = [ stakey(v) for v in _skx_tmp[0:len(skx_)] ]", "def getsnx(self,whichsol_,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if snx is None: raise TypeError(\"Invalid type for argument snx\")\n _copyback_snx = False\n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n _copyback_snx = True\n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n _copyback_snx = True\n if snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx has wrong length\")\n res = self.__obj.getsnx(whichsol_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_snx:\n snx[:] = _tmparr_snx", "def spkrinfo(path, istrain):\n with open(path) as fp:\n spkrt = {}\n ii = 0 # for label\n for line in fp:\n if line[0] != ';': # ignore header\n line = line.rstrip().split()\n sid, train = line[0], line[3].upper() == 'TRN'\n if not istrain ^ train:\n spkrt[sid] = ii\n ii += 1\n return spkrt", "def putsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n \n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n \n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n \n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n \n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n \n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n \n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n \n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n \n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n \n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n \n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n \n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n \n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n \n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n \n res = self.__obj.putsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def getsnx(self,whichsol_,snx_):\n _snx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and snx_ is not None and len(snx_) != self.getnumvar():\n raise ValueError(\"Array argument snx is not long enough: Is %d, expected %d\" % (len(snx_),self.getnumvar()))\n if isinstance(snx_,numpy.ndarray) and not snx_.flags.writeable:\n raise ValueError(\"Argument snx must be writable\")\n if snx_ is None:\n raise ValueError(\"Argument snx may not be None\")\n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_getsnx(self.__nativep,whichsol_,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _snx_copyarray:\n snx_[:] = _snx_np_tmp", "def getskxslice(self,whichsol_,first_,last_,skx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n _copyback_skx = False\n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n _copyback_skx = True\n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n _copyback_skx = True\n if skx_ is not None and len(skx_) != ((last_) - (first_)):\n raise ValueError(\"Array argument skx has wrong length\")\n res = self.__obj.getskxslice(whichsol_,first_,last_,skx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_skx:\n for __tmp_var_0 in range(len(skx_)): skx[__tmp_var_0] = stakey(_tmparr_skx[__tmp_var_0])", "def getslc(self,whichsol_,slc_):\n _slc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and slc_ is not None and len(slc_) != self.getnumcon():\n raise ValueError(\"Array argument slc is not long enough: Is %d, expected %d\" % (len(slc_),self.getnumcon()))\n if isinstance(slc_,numpy.ndarray) and not slc_.flags.writeable:\n raise ValueError(\"Argument slc must be writable\")\n if slc_ is None:\n raise ValueError(\"Argument slc may not be None\")\n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n res = __library__.MSK_XX_getslc(self.__nativep,whichsol_,_slc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _slc_copyarray:\n slc_[:] = _slc_np_tmp", "def stereogenic_bond_keys(gra, idx_dct=None, assigned=False):\n # Don't recalculate symmetry classes unless we have to\n idx_dct = class_indices(gra) if idx_dct is None else idx_dct\n ste_bnd_keys = _stereogenic_bond_keys(gra, idx_dct, assigned=assigned)\n return ste_bnd_keys", "def getsolutioninfo(self,whichsol_):\n pobj_ = ctypes.c_double()\n pviolcon_ = ctypes.c_double()\n pviolvar_ = ctypes.c_double()\n pviolbarvar_ = ctypes.c_double()\n pviolcone_ = ctypes.c_double()\n pviolitg_ = ctypes.c_double()\n dobj_ = ctypes.c_double()\n dviolcon_ = ctypes.c_double()\n dviolvar_ = ctypes.c_double()\n dviolbarvar_ = ctypes.c_double()\n dviolcone_ = ctypes.c_double()\n res = __library__.MSK_XX_getsolutioninfo(self.__nativep,whichsol_,ctypes.byref(pobj_),ctypes.byref(pviolcon_),ctypes.byref(pviolvar_),ctypes.byref(pviolbarvar_),ctypes.byref(pviolcone_),ctypes.byref(pviolitg_),ctypes.byref(dobj_),ctypes.byref(dviolcon_),ctypes.byref(dviolvar_),ctypes.byref(dviolbarvar_),ctypes.byref(dviolcone_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n pobj_ = pobj_.value\n _pobj_return_value = pobj_\n pviolcon_ = pviolcon_.value\n _pviolcon_return_value = pviolcon_\n pviolvar_ = pviolvar_.value\n _pviolvar_return_value = pviolvar_\n pviolbarvar_ = pviolbarvar_.value\n _pviolbarvar_return_value = pviolbarvar_\n pviolcone_ = pviolcone_.value\n _pviolcone_return_value = pviolcone_\n pviolitg_ = pviolitg_.value\n _pviolitg_return_value = pviolitg_\n dobj_ = dobj_.value\n _dobj_return_value = dobj_\n dviolcon_ = dviolcon_.value\n _dviolcon_return_value = dviolcon_\n dviolvar_ = dviolvar_.value\n _dviolvar_return_value = dviolvar_\n dviolbarvar_ = dviolbarvar_.value\n _dviolbarvar_return_value = dviolbarvar_\n dviolcone_ = dviolcone_.value\n _dviolcone_return_value = dviolcone_\n return (_pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value)", "def putskc(self,whichsol_,skc_):\n _skc_minlength = self.getnumcon()\n if self.getnumcon() > 0 and skc_ is not None and len(skc_) != self.getnumcon():\n raise ValueError(\"Array argument skc is not long enough: Is %d, expected %d\" % (len(skc_),self.getnumcon()))\n if skc_ is None:\n raise ValueError(\"Argument skc cannot be None\")\n if skc_ is None:\n raise ValueError(\"Argument skc may not be None\")\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n res = __library__.MSK_XX_putskc(self.__nativep,whichsol_,_skc_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_next_conf_keys(self):\n C_List = []\n for key in self.Poss_Tree:\n key_c = int(str(key)[-1])\n for choice in self.Poss_Tree[key]:\n if choice == key_c:\n C_List.append(int(construct_pass(key, choice)))\n return C_List", "def get_sys_index(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetSysIndex', self.handle)", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def pick_penalty(table, wtype, bpart, rank):\n try:\n return table[wtype][bpart][rank]\n\n except KeyError as error:\n return 'key_error', error.args[0]\n\n except IndexError as error:\n return 'index_error', error.args[0]" ]
[ "0.6662831", "0.6497118", "0.6283685", "0.6198206", "0.58799136", "0.5795298", "0.5409666", "0.53260463", "0.5285261", "0.5234887", "0.5229472", "0.5209229", "0.5130687", "0.5127639", "0.50948286", "0.503141", "0.49997705", "0.49190468", "0.49169722", "0.4883842", "0.4841338", "0.48387", "0.48210257", "0.4803247", "0.47723368", "0.47702193", "0.4769802", "0.47479364", "0.47379974", "0.4730783" ]
0.76341337
0